]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'arm/rockchip', 'arm/exynos', 'arm/smmu', 'x86/vt-d', 'x86/amd', ...
authorJoerg Roedel <jroedel@suse.de>
Fri, 19 Jun 2015 15:17:47 +0000 (17:17 +0200)
committerJoerg Roedel <jroedel@suse.de>
Fri, 19 Jun 2015 15:17:47 +0000 (17:17 +0200)
429 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt
Documentation/devicetree/bindings/usb/renesas_usbhs.txt
Documentation/hwmon/tmp401
Documentation/kernel-parameters.txt
Documentation/networking/udplite.txt
Documentation/target/tcmu-design.txt
MAINTAINERS
Makefile
arch/alpha/boot/Makefile
arch/alpha/boot/main.c
arch/alpha/boot/stdio.c [new file with mode: 0644]
arch/alpha/boot/tools/objstrip.c
arch/alpha/include/asm/types.h
arch/alpha/include/asm/unistd.h
arch/alpha/include/uapi/asm/unistd.h
arch/alpha/kernel/err_ev6.c
arch/alpha/kernel/irq.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/process.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/srmcons.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/systbls.S
arch/alpha/kernel/traps.c
arch/alpha/oprofile/op_model_ev4.c
arch/alpha/oprofile/op_model_ev5.c
arch/alpha/oprofile/op_model_ev6.c
arch/alpha/oprofile/op_model_ev67.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-boneblack.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am35xx-clocks.dtsi
arch/arm/boot/dts/armada-xp-linksys-mamba.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/omap3-n900.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event_cpu.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/mm/mmu.c
arch/arm64/boot/dts/mediatek/mt8173-evb.dts
arch/blackfin/include/asm/io.h
arch/ia64/kernel/smpboot.c
arch/ia64/pci/pci.c
arch/mips/ath79/prom.c
arch/mips/ath79/setup.c
arch/mips/cobalt/Makefile
arch/mips/configs/fuloong2e_defconfig
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/switch_to.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/irq.c
arch/mips/kernel/smp-bmips.c
arch/mips/kvm/emulate.c
arch/mips/lib/strnlen_user.S
arch/mips/loongson/common/Makefile
arch/mips/loongson/loongson-3/smp.c
arch/mips/mm/c-r4k.c
arch/mips/net/bpf_jit.c
arch/mips/ralink/ill_acc.c
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/score/lib/string.S
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/trap_block.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/leon_pci_grpci2.c
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/mm/init_64.c
arch/x86/boot/compressed/misc.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/segment.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_pt.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/i387.c
arch/x86/kvm/mmu.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/acpi.c
arch/xtensa/include/asm/dma-mapping.h
block/blk-mq.c
block/genhd.c
crypto/Kconfig
drivers/ata/ahci_mvebu.c
drivers/ata/pata_octeon_cf.c
drivers/base/cacheinfo.c
drivers/base/init.c
drivers/block/Kconfig
drivers/block/nvme-core.c
drivers/block/zram/zram_drv.c
drivers/bus/mips_cdmm.c
drivers/bus/mvebu-mbus.c
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/hsu/hsu.c
drivers/dma/pl330.c
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-kempld.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce3_1_afmt.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/vgem/Makefile
drivers/gpu/drm/vgem/vgem_dma_buf.c [deleted file]
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vgem/vgem_drv.h
drivers/hwmon/nct6683.c
drivers/hwmon/nct6775.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/tmp401.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/iio/adc/twl6030-gpadc.c
drivers/iio/imu/adis16400.h
drivers/iio/imu/adis16400_buffer.c
drivers/iio/imu/adis16400_core.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_proto.h
drivers/iommu/amd_iommu_types.h
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-sunxi-nmi.c
drivers/lguest/core.c
drivers/md/dm-mpath.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/mfd/da9052-core.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83640.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/ntb/ntb_hw.c
drivers/of/base.c
drivers/of/dynamic.c
drivers/pci/setup-bus.c
drivers/phy/Kconfig
drivers/phy/phy-core.c
drivers/phy/phy-omap-usb2.c
drivers/phy/phy-rcar-gen2.c
drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/platform/x86/thinkpad_acpi.c
drivers/regulator/da9052-regulator.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/soc/mediatek/Kconfig
drivers/soc/mediatek/mtk-pmic-wrap.c
drivers/ssb/driver_chipcommon_pmu.c
drivers/ssb/driver_pcicore.c
drivers/staging/ozwpan/ozhcd.c
drivers/staging/ozwpan/ozusbif.h
drivers/staging/ozwpan/ozusbsvc1.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/rtl8712/rtl871x_cmd.c
drivers/staging/rtl8712/rtl871x_mlme.c
drivers/staging/rtl8712/rtl871x_pwrctrl.c
drivers/staging/rtl8712/rtl871x_sta_mgt.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/tty/mips_ejtag_fdc.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/imx.c
drivers/usb/dwc3/core.h
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/legacy/g_ffs.c
drivers/usb/gadget/udc/s3c2410_udc.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_core.c
drivers/usb/phy/phy-ab8500-usb.c
drivers/usb/phy/phy-tahvo.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/vhost/scsi.c
drivers/video/backlight/pwm_bl.c
drivers/virtio/virtio_pci_common.c
fs/binfmt_elf.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/fhandle.c
fs/omfs/bitmap.c
fs/omfs/inode.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/super.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_mount.c
include/linux/backing-dev.h
include/linux/brcmphy.h
include/linux/cpumask.h
include/linux/intel-iommu.h
include/linux/iommu.h
include/linux/of.h
include/linux/percpu_counter.h
include/linux/perf_event.h
include/net/inet_connection_sock.h
include/net/mac80211.h
include/net/sctp/sctp.h
include/sound/hda_regmap.h
include/target/target_core_backend.h
include/target/target_core_configfs.h
include/target/target_core_fabric.h
include/trace/events/kmem.h
include/trace/events/writeback.h
include/uapi/linux/virtio_balloon.h
kernel/compat.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/module.c
kernel/sched/fair.c
kernel/trace/ring_buffer_benchmark.c
lib/cpumask.c
lib/mpi/longlong.h
lib/percpu_counter.c
lib/rhashtable.c
lib/strnlen_user.c
lib/swiotlb.c
mm/backing-dev.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/zsmalloc.c
net/bridge/br_fdb.c
net/bridge/br_multicast.c
net/bridge/netfilter/ebtables.c
net/caif/caif_socket.c
net/core/dev.c
net/core/ethtool.c
net/core/skbuff.c
net/core/sock.c
net/dsa/dsa.c
net/ipv4/esp4.c
net/ipv4/ip_vti.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/addrconf_core.c
net/ipv6/esp6.c
net/ipv6/ip6_vti.c
net/ipv6/udp.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/rx.c
net/mac80211/util.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/openvswitch/vport-netdev.c
net/sched/sch_api.c
net/sctp/auth.c
net/tipc/socket.c
net/unix/af_unix.c
net/wireless/wext-compat.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
scripts/checkpatch.pl
scripts/gdb/linux/modules.py
sound/hda/hdac_regmap.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/hda/thinkpad_helper.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/quirks.c
tools/net/bpf_jit_disasm.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/entry_from_vm86.c [new file with mode: 0644]

index 99983e67c13c9f6aadff74c1969a4d27cede7d26..da95513571ea3e3e53263f6c91588fb58d50f3fb 100644 (file)
@@ -162,7 +162,7 @@ Description:        Discover CPUs in the same CPU frequency coordination domain
 What:          /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
 Date:          August 2008
 KernelVersion: 2.6.27
-Contact:       discuss@x86-64.org
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Disable L3 cache indices
 
                These files exist in every CPU's cache/index3 directory. Each
index 4b641c7bf1c252a3465aa7e028e18042cb7ad61b..09089a6d69ed8d1c9b29115e6abce75a6d1a2fcd 100644 (file)
@@ -32,8 +32,8 @@ Example:
                touchscreen-fuzz-x = <4>;
                touchscreen-fuzz-y = <7>;
                touchscreen-fuzz-pressure = <2>;
-               touchscreen-max-x = <4096>;
-               touchscreen-max-y = <4096>;
+               touchscreen-size-x = <4096>;
+               touchscreen-size-y = <4096>;
                touchscreen-max-pressure = <2048>;
 
                ti,x-plate-ohms = <280>;
index dc2a18f0b3a10a9e1bd5814fc429fe9246b82ec7..ddbe304beb212238e859640905b83886e5164ac7 100644 (file)
@@ -15,10 +15,8 @@ Optional properties:
   - phys: phandle + phy specifier pair
   - phy-names: must be "usb"
   - dmas: Must contain a list of references to DMA specifiers.
-  - dma-names : Must contain a list of DMA names:
-   - tx0 ... tx<n>
-   - rx0 ... rx<n>
-    - This <n> means DnFIFO in USBHS module.
+  - dma-names : named "ch%d", where %d is the channel number ranging from zero
+                to the number of channels (DnFIFOs) minus one.
 
 Example:
        usbhs: usb@e6590000 {
index 8eb88e974055f62f2c9226bcd6d98b3e8e5ea908..711f75e189eba003e31a3f762fa16be04d1012d0 100644 (file)
@@ -20,7 +20,7 @@ Supported chips:
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
   * Texas Instruments TMP435
     Prefix: 'tmp435'
-    Addresses scanned: I2C 0x37, 0x48 - 0x4f
+    Addresses scanned: I2C 0x48 - 0x4f
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
 
 Authors:
index 61ab1628a057cc2c4d8b11d892d834f7e5f7773a..6726139bd2899038e77ae15f9901351773dd324b 100644 (file)
@@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
+               ecs_off [Default Off]
+                       By default, extended context tables will be supported if
+                       the hardware advertises that it has support both for the
+                       extended tables themselves, and also PASID support. With
+                       this option set, extended tables will not be used even
+                       on hardware which claims to support them.
 
        intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
                        0       disables intel_idle and fall back on acpi_idle.
index d727a38291005f962848ed40a1ab11db4c167899..53a726855e49bfa4c313e46e15df1eec7cb610ae 100644 (file)
@@ -20,7 +20,7 @@
        files/UDP-Lite-HOWTO.txt
 
    o The Wireshark UDP-Lite WiKi (with capture files):
-       http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
+       https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
 
    o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt
 
index 43e94ea6d2cad8d0e17715a7ecf4d1e31b9b9e0e..263b907517ac2cd14e3b8472f4bc23f4aa8aae07 100644 (file)
@@ -15,8 +15,7 @@ Contents:
   a) Discovering and configuring TCMU uio devices
   b) Waiting for events on the device(s)
   c) Managing the command ring
-3) Command filtering and pass_level
-4) A final note
+3) A final note
 
 
 TCM Userspace Design
@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map)
   /* Process events from cmd ring until we catch up with cmd_head */
   while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
 
-    if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) {
+    if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) {
       uint8_t *cdb = (void *)mb + ent->req.cdb_off;
       bool success = true;
 
@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map)
         ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
       }
     }
+    else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) {
+      /* Tell the kernel we didn't handle unknown opcodes */
+      ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
+    }
     else {
-      /* Do nothing for PAD entries */
+      /* Do nothing for PAD entries except update cmd_tail */
     }
 
     /* update cmd_tail */
@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map)
 }
 
 
-Command filtering and pass_level
---------------------------------
-
-TCMU supports a "pass_level" option with valid values of 0 or 1.  When
-the value is 0 (the default), nearly all SCSI commands received for
-the device are passed through to the handler. This allows maximum
-flexibility but increases the amount of code required by the handler,
-to support all mandatory SCSI commands. If pass_level is set to 1,
-then only IO-related commands are presented, and the rest are handled
-by LIO's in-kernel command emulation. The commands presented at level
-1 include all versions of:
-
-READ
-WRITE
-WRITE_VERIFY
-XDWRITEREAD
-WRITE_SAME
-COMPARE_AND_WRITE
-SYNCHRONIZE_CACHE
-UNMAP
-
-
 A final note
 ------------
 
index 469d03b06b1f3589f9a89d1b8f41ecd7db024baa..82a8de8d618c7da2e49df55ffcf6b7fae18ff300 100644 (file)
@@ -51,9 +51,9 @@ trivial patch so apply some common sense.
        or does something very odd once a month document it.
 
        PLEASE remember that submissions must be made under the terms
-       of the OSDL certificate of contribution and should include a
-       Signed-off-by: line.  The current version of this "Developer's
-       Certificate of Origin" (DCO) is listed in the file
+       of the Linux Foundation certificate of contribution and should
+       include a Signed-off-by: line.  The current version of this
+       "Developer's Certificate of Origin" (DCO) is listed in the file
        Documentation/SubmittingPatches.
 
 6.     Make sure you have the right to send any changes you make. If you
@@ -2428,7 +2428,6 @@ L:        linux-security-module@vger.kernel.org
 S:     Supported
 F:     include/linux/capability.h
 F:     include/uapi/linux/capability.h
-F:     security/capability.c
 F:     security/commoncap.c
 F:     kernel/capability.c
 
@@ -7577,6 +7576,7 @@ F:        drivers/pci/host/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSIS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
+M:     Pratyush Anand <pratyush.anand@gmail.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     drivers/pci/host/*designware*
@@ -7590,8 +7590,9 @@ F:        Documentation/devicetree/bindings/pci/host-generic-pci.txt
 F:     drivers/pci/host/pci-host-generic.c
 
 PCIE DRIVER FOR ST SPEAR13XX
+M:     Pratyush Anand <pratyush.anand@gmail.com>
 L:     linux-pci@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     drivers/pci/host/*spear*
 
 PCMCIA SUBSYSTEM
@@ -10589,8 +10590,7 @@ F:      drivers/virtio/virtio_input.c
 F:     include/uapi/linux/virtio_input.h
 
 VIA RHINE NETWORK DRIVER
-M:     Roger Luethi <rl@hellgate.ch>
-S:     Maintained
+S:     Orphan
 F:     drivers/net/ethernet/via/via-rhine.c
 
 VIA SD/MMC CARD CONTROLLER DRIVER
index 92a70785920535c6125d9695743149d3627efbc2..3ba504496cb2deb13b50d39d01ec11641502e342 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index cd143887380a26da88e8372828dc586ce3ff7b31..8399bd0e68e8e5cb7aba078cd40864d89b971eed 100644 (file)
@@ -14,6 +14,9 @@ targets               := vmlinux.gz vmlinux \
                   tools/bootpzh bootloader bootpheader bootpzheader 
 OBJSTRIP       := $(obj)/tools/objstrip
 
+HOSTCFLAGS     := -Wall -I$(objtree)/usr/include
+BOOTCFLAGS     += -I$(obj) -I$(srctree)/$(obj)
+
 # SRM bootable image.  Copy to offset 512 of a partition.
 $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
        ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ 
@@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE
 $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE
        $(call if_changed,objstrip)
 
-LDFLAGS_bootloader   := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpheader  := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpzheader := -static -uvsprintf -T  #-N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootpheader  := -static -T # -N -relax
+LDFLAGS_bootpzheader := -static -T # -N -relax
 
-OBJ_bootlx   := $(obj)/head.o $(obj)/main.o
-OBJ_bootph   := $(obj)/head.o $(obj)/bootp.o
-OBJ_bootpzh  := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o
+OBJ_bootlx   := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o
+OBJ_bootph   := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o
+OBJ_bootpzh  := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o
 
 $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE
        $(call if_changed,ld)
index 3baf2d1e908df5760f1304bab309ae70877eee03..dd6eb4a33582e63def4b015c0a1ad496889feacd 100644 (file)
@@ -19,7 +19,6 @@
 
 #include "ksize.h"
 
-extern int vsprintf(char *, const char *, va_list);
 extern unsigned long switch_to_osf_pal(unsigned long nr,
        struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
        unsigned long *vptb);
diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c
new file mode 100644 (file)
index 0000000..f844dae
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+
+size_t strnlen(const char * s, size_t count)
+{
+       const char *sc;
+
+       for (sc = s; count-- && *sc != '\0'; ++sc)
+               /* nothing */;
+       return sc - s;
+}
+
+# define do_div(n, base) ({                                            \
+       unsigned int __base = (base);                                   \
+       unsigned int __rem;                                             \
+       __rem = ((unsigned long long)(n)) % __base;                     \
+       (n) = ((unsigned long long)(n)) / __base;                       \
+       __rem;                                                          \
+})
+
+
+static int skip_atoi(const char **s)
+{
+       int i, c;
+
+       for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
+               i = i*10 + c - '0';
+       return i;
+}
+
+#define ZEROPAD        1               /* pad with zero */
+#define SIGN   2               /* unsigned/signed long */
+#define PLUS   4               /* show plus */
+#define SPACE  8               /* space if plus */
+#define LEFT   16              /* left justified */
+#define SPECIAL        32              /* 0x */
+#define LARGE  64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+       int i;
+
+       if (type & LARGE)
+               digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       if (type & LEFT)
+               type &= ~ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & SIGN) {
+               if ((signed long long)num < 0) {
+                       sign = '-';
+                       num = - (signed long long)num;
+                       size--;
+               } else if (type & PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0) {
+               tmp[i++] = digits[do_div(num, base)];
+       }
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(ZEROPAD+LEFT)))
+               while(size-->0)
+                       *str++ = ' ';
+       if (sign)
+               *str++ = sign;
+       if (type & SPECIAL) {
+               if (base==8)
+                       *str++ = '0';
+               else if (base==16) {
+                       *str++ = '0';
+                       *str++ = digits[33];
+               }
+       }
+       if (!(type & LEFT))
+               while (size-- > 0)
+                       *str++ = c;
+       while (i < precision--)
+               *str++ = '0';
+       while (i-- > 0)
+               *str++ = tmp[i];
+       while (size-- > 0)
+               *str++ = ' ';
+       return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char * str;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+
+       for (str=buf ; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       *str++ = *fmt;
+                       continue;
+               }
+
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= LEFT; goto repeat;
+                               case '+': flags |= PLUS; goto repeat;
+                               case ' ': flags |= SPACE; goto repeat;
+                               case '#': flags |= SPECIAL; goto repeat;
+                               case '0': flags |= ZEROPAD; goto repeat;
+                               }
+
+               /* get field width */
+               field_width = -1;
+               if ('0' <= *fmt && *fmt <= '9')
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;
+                       if ('0' <= *fmt && *fmt <= '9')
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'l' && *(fmt + 1) == 'l') {
+                       qualifier = 'q';
+                       fmt += 2;
+               } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L'
+                       || *fmt == 'Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+               case 'c':
+                       if (!(flags & LEFT))
+                               while (--field_width > 0)
+                                       *str++ = ' ';
+                       *str++ = (unsigned char) va_arg(args, int);
+                       while (--field_width > 0)
+                               *str++ = ' ';
+                       continue;
+
+               case 's':
+                       s = va_arg(args, char *);
+                       if (!s)
+                               s = "<NULL>";
+
+                       len = strnlen(s, precision);
+
+                       if (!(flags & LEFT))
+                               while (len < field_width--)
+                                       *str++ = ' ';
+                       for (i = 0; i < len; ++i)
+                               *str++ = *s++;
+                       while (len < field_width--)
+                               *str++ = ' ';
+                       continue;
+
+               case 'p':
+                       if (field_width == -1) {
+                               field_width = 2*sizeof(void *);
+                               flags |= ZEROPAD;
+                       }
+                       str = number(str,
+                               (unsigned long) va_arg(args, void *), 16,
+                               field_width, precision, flags);
+                       continue;
+
+
+               case 'n':
+                       if (qualifier == 'l') {
+                               long * ip = va_arg(args, long *);
+                               *ip = (str - buf);
+                       } else if (qualifier == 'Z') {
+                               size_t * ip = va_arg(args, size_t *);
+                               *ip = (str - buf);
+                       } else {
+                               int * ip = va_arg(args, int *);
+                               *ip = (str - buf);
+                       }
+                       continue;
+
+               case '%':
+                       *str++ = '%';
+                       continue;
+
+               /* integer number formats - set up the flags and "break" */
+               case 'o':
+                       base = 8;
+                       break;
+
+               case 'X':
+                       flags |= LARGE;
+               case 'x':
+                       base = 16;
+                       break;
+
+               case 'd':
+               case 'i':
+                       flags |= SIGN;
+               case 'u':
+                       break;
+
+               default:
+                       *str++ = '%';
+                       if (*fmt)
+                               *str++ = *fmt;
+                       else
+                               --fmt;
+                       continue;
+               }
+               if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'q') {
+                       num = va_arg(args, unsigned long long);
+                       if (flags & SIGN)
+                               num = (signed long long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, num, base, field_width, precision, flags);
+       }
+       *str = '\0';
+       return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i=vsprintf(buf,fmt,args);
+       va_end(args);
+       return i;
+}
index 367d53d031fc04d51af471273a0256a5a08432c7..dee82695f48bad69b0d9cf81457196e321d2ff55 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/param.h>
 #ifdef __ELF__
 # include <linux/elf.h>
+# define elfhdr elf64_hdr
+# define elf_phdr elf64_phdr
+# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
 #endif
 
 /* bootfile size must be multiple of BLOCK_SIZE: */
index f61e1a56c3787bcbd4a2ab093d1c58c7715fa6c4..4cb4b6d3452c0b3439c3aa3c0f928f74de09fb3a 100644 (file)
@@ -2,6 +2,5 @@
 #define _ALPHA_TYPES_H
 
 #include <asm-generic/int-ll64.h>
-#include <uapi/asm/types.h>
 
 #endif /* _ALPHA_TYPES_H */
index c509d306db4561ea65a40703b42e7f9bd078d352..a56e608db2f9e4aad716b96669de02c7571dc1df 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define NR_SYSCALLS                    511
+#define NR_SYSCALLS                    514
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index d214a0358100b6ad82a63fce68bc6016eb9ddaa4..aa33bf5aacb6c1666203e38700939750c90cb5c5 100644 (file)
 #define __NR_sched_setattr             508
 #define __NR_sched_getattr             509
 #define __NR_renameat2                 510
+#define __NR_getrandom                 511
+#define __NR_memfd_create              512
+#define __NR_execveat                  513
 
 #endif /* _UAPI_ALPHA_UNISTD_H */
index 253cf1a87481e815ad9a724dde1fef51b5616d09..51267ac5729b9c7276a0e838357bfb8ffd29e7db 100644 (file)
@@ -6,7 +6,6 @@
  *     Error handling code supporting Alpha systems
  */
 
-#include <linux/init.h>
 #include <linux/sched.h>
 
 #include <asm/io.h>
index 7b2be251c30fb92981d4aef8cf4f8951bed24728..51f2c8654253f2bd6667ccff24c0db09a7f80ccc 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/ptrace.h>
 #include <linux/interrupt.h>
 #include <linux/random.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
index e51f578636a5718d4f0e438b90b4b78a12b6b7da..36dc91ace83ae97069df82f5e3923a24275c6a9b 100644 (file)
@@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
        if (tv) {
                if (get_tv32((struct timeval *)&kts, tv))
                        return -EFAULT;
+               kts.tv_nsec *= 1000;
        }
        if (tz) {
                if (copy_from_user(&ktz, tz, sizeof(*tz)))
                        return -EFAULT;
        }
 
-       kts.tv_nsec *= 1000;
-
        return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
 }
 
index 1941a07b5811f925aed82e853aab4efb081f74ca..84d13263ce46f193ef0b223466cea2f522ca109d 100644 (file)
@@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task)
 }
 
 /*
- * Copy an alpha thread..
+ * Copy architecture-specific thread state
  */
-
 int
 copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long arg,
+           unsigned long kthread_arg,
            struct task_struct *p)
 {
        extern void ret_from_fork(void);
@@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                        sizeof(struct switch_stack) + sizeof(struct pt_regs));
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
-               childstack->r10 = arg;
+               childstack->r10 = kthread_arg;
                childregs->hae = alpha_mv.hae_cache,
                childti->pcb.usp = 0;
                return 0;
index 99ac36d5de4efd10832804e82509e062606720e2..2f24447fef92071b0ba9b94d09f8ed1fdc25d2d1 100644 (file)
@@ -63,7 +63,6 @@ static struct {
 enum ipi_message_type {
        IPI_RESCHEDULE,
        IPI_CALL_FUNC,
-       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
 };
 
@@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier)
        return -EINVAL;
 }
 
-\f
 static void
 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 {
@@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs)
                        generic_smp_call_function_interrupt();
                        break;
 
-               case IPI_CALL_FUNC_SINGLE:
-                       generic_smp_call_function_single_interrupt();
-                       break;
-
                case IPI_CPU_STOP:
                        halt();
 
@@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 static void
index 6f01d9ad7b814700d8bd56094b13d3af3474cc11..72b59511e59aa350cc58d568cf896edba4f53602 100644 (file)
@@ -237,8 +237,7 @@ srmcons_init(void)
 
        return -ENODEV;
 }
-
-module_init(srmcons_init);
+device_initcall(srmcons_init);
 
 \f
 /*
index f21d61fab6787331d21571958185b637fc601bb7..24e41bd7d3c99060a7411c1c5774941249c89d72 100644 (file)
@@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
        pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
        irq = intline;
 
-       msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       msi_loc = dev->msi_cap;
        msg_ctl = 0;
        if (msi_loc) 
                pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
index 24789713f1eafb4757ec1084c32225ce88bf4ad4..9b62e3fd4f038a925657beb15de3de89f8548473 100644 (file)
@@ -529,6 +529,9 @@ sys_call_table:
        .quad sys_sched_setattr
        .quad sys_sched_getattr
        .quad sys_renameat2                     /* 510 */
+       .quad sys_getrandom
+       .quad sys_memfd_create
+       .quad sys_execveat
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 9c4c189eb22f5a9db2d2ae678756a5241b3e1ee5..74aceead06e98a391a1f0fc49f5486ef2562844c 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/tty.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/ratelimit.h>
 
index 18aa9b4f94f1822be3e01ea0906fd2cf234c1205..086a0d5445c528b631cec10fd48c5643a4101f86 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index c32f8a0ad92543a0d6e6767e698f51da0972c17e..c300f5ef3482b82330d41c0b4d318362d76d4092 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 1c84cc257fc7ec7a6c3df970722b381f0ed17ff3..02edf59716144e0939eb2933cfb303fa457ecbd7 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 34a57a12655377727930f8abba88082f3afde149..adb1744d20f3845efb48a314e56784a5c5470a0a 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 86217db2937ab331666b710c1bd639c68caad516..992736b5229ba7bd06497feb35ecff5fc36ab232 100644 (file)
@@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
        imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
        imx25-karo-tx25.dtb \
        imx25-pdk.dtb
-dtb-$(CONFIG_SOC_IMX31) += \
+dtb-$(CONFIG_SOC_IMX27) += \
        imx27-apf27.dtb \
        imx27-apf27dev.dtb \
        imx27-eukrea-mbimxsd27-baseboard.dtb \
index c3255e0c90aa829fc792f02d1265d413f3c6e624..dbb3f4d2bf84ebf4565555949053c94619ea161d 100644 (file)
 /include/ "tps65217.dtsi"
 
 &tps {
+       /*
+        * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
+        * mode") at poweroff.  Most BeagleBone versions do not support RTC-only
+        * mode and risk hardware damage if this mode is entered.
+        *
+        * For details, see linux-omap mailing list May 2015 thread
+        *      [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
+        * In particular, messages:
+        *      http://www.spinics.net/lists/linux-omap/msg118585.html
+        *      http://www.spinics.net/lists/linux-omap/msg118615.html
+        *
+        * You can override this later with
+        *      &tps {  /delete-property/ ti,pmic-shutdown-controller;  }
+        * if you want to use RTC-only mode and made sure you are not affected
+        * by the hardware problems. (Tip: double-check by performing a current
+        * measurement after shutdown: it should be less than 1 mA.)
+        */
+       ti,pmic-shutdown-controller;
+
        regulators {
                dcdc1_reg: regulator@0 {
                        regulator-name = "vdds_dpr";
index 5c42d259fa68fbf29c98439306badd1d44987ee0..901739fcb85a37abba32822463caea432637d274 100644 (file)
@@ -80,7 +80,3 @@
                status = "okay";
        };
 };
-
-&rtc {
-       system-power-controller;
-};
index 87fc7a35e80261cad03261fd9cf8b047606fc286..156d05efcb70bf5af737cf67892691d41b84fb2b 100644 (file)
        wlcore: wlcore@2 {
                compatible = "ti,wl1271";
                reg = <2>;
-               interrupt-parent = <&gpio1>;
+               interrupt-parent = <&gpio0>;
                interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */
                ref-clock-frequency = <38400000>;
        };
index 518b8fde88b0c87005fe68e413cfee769befac07..18cc826e9db534714a1b4d8a3cfc497e43ffcc85 100644 (file)
@@ -12,7 +12,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <1>;
        };
 
@@ -20,7 +20,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&rmii_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <9>;
        };
 
@@ -28,7 +28,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <2>;
        };
 
@@ -36,7 +36,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&pclk_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <10>;
        };
 
@@ -44,7 +44,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <0>;
        };
 
@@ -52,7 +52,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&sys_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <8>;
        };
 
@@ -60,7 +60,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&sys_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <3>;
        };
 };
index a2cf2154dcdb68d8374c2bea4b136fccaccb7aa2..fdd187c55aa5f78b5ab61d15dc12c1ad001990d2 100644 (file)
 
                internal-regs {
 
+                       rtc@10300 {
+                               /* No crystal connected to the internal RTC */
+                               status = "disabled";
+                       };
+
                        /* J10: VCC, NC, RX, NC, TX, GND  */
                        serial@12000 {
                                status = "okay";
index de8427be830a32e24a01ace97f11303435528b7b..289806adb343806aefce22e63b6caa1d558741fb 100644 (file)
                        ti,hwmods = "usb_otg_hs";
 
                        usb0: usb@47401000 {
-                               compatible = "ti,musb-am33xx";
+                               compatible = "ti,musb-dm816";
                                reg = <0x47401400 0x400
                                       0x47401000 0x200>;
                                reg-names = "mc", "control";
                        };
 
                        usb1: usb@47401800 {
-                               compatible = "ti,musb-am33xx";
+                               compatible = "ti,musb-dm816";
                                reg = <0x47401c00 0x400
                                       0x47401800 0x200>;
                                reg-names = "mc", "control";
index 173ffa479ad3cb03eb6e6742663fafaccacf9d53..792394dd0f2ab3ebf347af1e3f08d7ad330ee997 100644 (file)
 
                        display-timings {
                                timing-0 {
-                                       clock-frequency = <0>;
+                                       clock-frequency = <57153600>;
                                        hactive = <720>;
                                        vactive = <1280>;
                                        hfront-porch = <5>;
index 6951b66d1ab7b4cbe37dcf8944a7626213979827..bc215e4b75fd52c6e5b2e271b4a9e6265d442205 100644 (file)
 
                        fec: ethernet@1002b000 {
                                compatible = "fsl,imx27-fec";
-                               reg = <0x1002b000 0x4000>;
+                               reg = <0x1002b000 0x1000>;
                                interrupts = <50>;
                                clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
                                         <&clks IMX27_CLK_FEC_AHB_GATE>;
index 134d3f27a8ec5ae1f4f68982da4047dfcff13e13..921de6605f075d878f407d925a5652195bd2b41d 100644 (file)
        nand@0,0 {
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
                nand-bus-width = <16>;
+               gpmc,device-width = <2>;
+               ti,nand-ecc-opt = "sw";
 
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
index 5c16145920eafd9604f0571dfe60a23986313871..5f5e0f3d5b64fcb2283f72b9df923c793be7f75c 100644 (file)
                touchscreen-fuzz-x = <4>;
                touchscreen-fuzz-y = <7>;
                touchscreen-fuzz-pressure = <2>;
-               touchscreen-max-x = <4096>;
-               touchscreen-max-y = <4096>;
+               touchscreen-size-x = <4096>;
+               touchscreen-size-y = <4096>;
                touchscreen-max-pressure = <2048>;
 
                ti,x-plate-ohms = <280>;
index 0ca4a3eaf65d02751d08c774c1b73ff21092c408..fbbb1915c6a95a81ac3edc58a6725f96c3c8b890 100644 (file)
@@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_ISP1760=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
index f8ccc21fa032354facead9735abf9f4eb0cb7eb2..4e7f40c577e6e4fe9df3dd677b0d146b1ceb13c2 100644 (file)
@@ -33,7 +33,9 @@ ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq                             @ disable interrupts
-       ldr     r1, [tsk, #TI_FLAGS]
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+       tst     r1, #_TIF_SYSCALL_WORK
+       bne     __sys_trace_return
        tst     r1, #_TIF_WORK_MASK
        bne     fast_work_pending
        asm_trace_hardirqs_on
index 213919ba326fbad35bd356b19bc0c0546cbc70b1..3b8c2833c5379aa36ca3a0a384bb740df3f3284a 100644 (file)
@@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu)
 static int of_pmu_irq_cfg(struct platform_device *pdev)
 {
        int i, irq;
-       int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-
-       if (!irqs)
-               return -ENOMEM;
+       int *irqs;
 
        /* Don't bother with PPIs; they're already affine */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0 && irq_is_percpu(irq))
                return 0;
 
+       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+       if (!irqs)
+               return -ENOMEM;
+
        for (i = 0; i < pdev->num_resources; ++i) {
                struct device_node *dn;
                int cpu;
index c0b6dccbf7bd5d8d14c05172d11d6ee690bf8fef..7d23ce04cad5201919a58aefccafc88e882dd844 100644 (file)
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
 static u32 exynos_irqwake_intmask = 0xffffffff;
 
 static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
-       { 105, BIT(1) }, /* RTC alarm */
-       { 106, BIT(2) }, /* RTC tick */
+       { 73, BIT(1) }, /* RTC alarm */
+       { 74, BIT(2) }, /* RTC tick */
        { /* sentinel */ },
 };
 
index 4d60005e9277ce8f33307f411dd5c0baa8a6ac7a..6d0893a3828eb6b57322ddd0e0df26ac7e32bd85 100644 (file)
@@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void)
        struct device_node *np;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
-       if (WARN_ON(!np ||
-                   !of_find_property(np, "interrupt-controller", NULL)))
-               pr_warn("Outdated DT detected, system is about to crash!!!\n");
+       if (WARN_ON(!np))
+               return;
+
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
+               pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+
+               /* map GPC, so that at least CPUidle and WARs keep working */
+               gpc_base = of_iomap(np, 0);
+       }
 }
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev)
        struct regulator *pu_reg;
        int ret;
 
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
+               return 0;
+
        pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
        if (PTR_ERR(pu_reg) == -ENODEV)
                pu_reg = NULL;
index d1dedc8195ed2569508e0d522301bdf535aefda8..eafd120b53f1bc15c82f2cc47dc8033e31ca566e 100644 (file)
@@ -203,23 +203,8 @@ save_context_wfi:
         */
        ldr     r1, kernel_flush
        blx     r1
-       /*
-        * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
-        * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
-        * This sequence switches back to ARM.  Note that .align may insert a
-        * nop: bx pc needs to be word-aligned in order to work.
-        */
- THUMB(        .thumb          )
- THUMB(        .align          )
- THUMB(        bx      pc      )
- THUMB(        nop             )
-       .arm
-
        b       omap3_do_wfi
-
-/*
- * Local variables
- */
+ENDPROC(omap34xx_cpu_suspend)
 omap3_do_wfi_sram_addr:
        .word omap3_do_wfi_sram
 kernel_flush:
@@ -364,10 +349,7 @@ exit_nonoff_modes:
  * ===================================
  */
        ldmfd   sp!, {r4 - r11, pc}     @ restore regs and return
-
-/*
- * Local variables
- */
+ENDPROC(omap3_do_wfi)
 sdrc_power:
        .word   SDRC_POWER_V
 cm_idlest1_core:
index f1aeb54fabe36be6ef28cd48bcb1645efdbf16df..2385052b0ce1326d9f1ae79959d16a9fea459361 100644 (file)
@@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev)
        struct resource *res;
        struct cplds *fpga;
        int ret;
-       unsigned int base_irq = 0;
+       int base_irq;
        unsigned long irqflags = 0;
 
        fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
index 4e6ef896c6195db73f770957e9df619a0be05e06..7186382672b5eec605cba5ff491a7019914d304b 100644 (file)
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
                        }
 
                        /*
-                        * Find the first non-section-aligned page, and point
+                        * Find the first non-pmd-aligned page, and point
                         * memblock_limit at it. This relies on rounding the
-                        * limit down to be section-aligned, which happens at
-                        * the end of this function.
+                        * limit down to be pmd-aligned, which happens at the
+                        * end of this function.
                         *
                         * With this algorithm, the start or end of almost any
-                        * bank can be non-section-aligned. The only exception
-                        * is that the start of the bank 0 must be section-
+                        * bank can be non-pmd-aligned. The only exception is
+                        * that the start of the bank 0 must be section-
                         * aligned, since otherwise memory would need to be
                         * allocated when mapping the start of bank 0, which
                         * occurs before any free memory is mapped.
                         */
                        if (!memblock_limit) {
-                               if (!IS_ALIGNED(block_start, SECTION_SIZE))
+                               if (!IS_ALIGNED(block_start, PMD_SIZE))
                                        memblock_limit = block_start;
-                               else if (!IS_ALIGNED(block_end, SECTION_SIZE))
+                               else if (!IS_ALIGNED(block_end, PMD_SIZE))
                                        memblock_limit = arm_lowmem_limit;
                        }
 
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
        high_memory = __va(arm_lowmem_limit - 1) + 1;
 
        /*
-        * Round the memblock limit down to a section size.  This
+        * Round the memblock limit down to a pmd size.  This
         * helps to ensure that we will allocate memory from the
-        * last full section, which should be mapped.
+        * last full pmd, which should be mapped.
         */
        if (memblock_limit)
-               memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+               memblock_limit = round_down(memblock_limit, PMD_SIZE);
        if (!memblock_limit)
                memblock_limit = arm_lowmem_limit;
 
index 43d54017b779d4e211462b8bebe2604025bb08ea..d0ab012fa379eb97c6e43ebad83ee18185d2b598 100644 (file)
@@ -16,7 +16,8 @@
 #include "mt8173.dtsi"
 
 / {
-       model = "mediatek,mt8173-evb";
+       model = "MediaTek MT8173 evaluation board";
+       compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
 
        aliases {
                serial0 = &uart0;
index 4e8ad0523118d631ea24f6b9f8fd1c3ffb123194..6abebe82d4e93ed0329f271cd54e2af5c1bc38d2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
+#include <asm/def_LPBlackfin.h>
 
 #define __raw_readb bfin_read8
 #define __raw_readw bfin_read16
index 15051e9c2c6f98f3f2e8743739f10b63f795be3a..b054c5c6e7137cf85ba00f8c60fa33b8719b0c07 100644 (file)
@@ -127,7 +127,7 @@ int smp_num_siblings = 1;
 volatile int ia64_cpu_to_sapicid[NR_CPUS];
 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
 
-static volatile cpumask_t cpu_callin_map;
+static cpumask_t cpu_callin_map;
 
 struct smp_boot_data smp_boot_data __initdata;
 
@@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
        for (timeout = 0; timeout < 100000; timeout++) {
                if (cpumask_test_cpu(cpu, &cpu_callin_map))
                        break;  /* It has booted */
+               barrier(); /* Make sure we re-read cpu_callin_map */
                udelay(100);
        }
        Dprintk("\n");
index d4e162d35b3467b9d7814769129237583f8cb100..7cc3be9fa7c65a0dd700dfd30c04cc7be822922d 100644 (file)
@@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_controller *controller = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_controller *controller = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       }
        return 0;
 }
 
index e1fe6305113612cf2943bff9307a9579a9da3f69..597899ad5438e3b551a0b190555546e3a0e5b218 100644 (file)
@@ -1,6 +1,7 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X specific prom routines
  *
+ *  Copyright (C) 2015 Laurent Fasnacht <l@libres.ch>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
@@ -25,12 +26,14 @@ void __init prom_init(void)
 {
        fw_init_cmdline();
 
+#ifdef CONFIG_BLK_DEV_INITRD
        /* Read the initrd address from the firmware environment */
        initrd_start = fw_getenvl("initrd_start");
        if (initrd_start) {
                initrd_start = KSEG0ADDR(initrd_start);
                initrd_end = initrd_start + fw_getenvl("initrd_size");
        }
+#endif
 }
 
 void __init prom_free_prom_memory(void)
index a73c93c3d44a1069149945cf2732aeed26c918bd..7fc8397d16f21d713ad3e073308f69c75dd88692 100644 (file)
@@ -225,7 +225,7 @@ void __init plat_time_init(void)
        ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
        ref_clk_rate = ath79_get_sys_clk_rate("ref");
 
-       pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz",
+       pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
                cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
                ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
                ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
index 558e94977942033dc8247bcc510ebb705aa9698a..68f0c5871adcdf51f40380ffbba09b1e5e52202c 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for the Cobalt micro systems family specific parts of the kernel
 #
 
-obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o
+obj-y := buttons.o irq.o lcd.o led.o mtd.o reset.o rtc.o serial.o setup.o time.o
 
 obj-$(CONFIG_PCI)              += pci.o
-obj-$(CONFIG_MTD_PHYSMAP)      += mtd.o
index 002680648dcb22307f338f2db93af43c372a01ff..b2a577ebce0b08f79650d8ef2bb096fcb6d0a27e 100644 (file)
@@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m
 CONFIG_USB_C67X00_HCD=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_ISP1760=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_UHCI_HCD=m
 CONFIG_USB_R8A66597_HCD=m
index 18ae5ddef118c071e1240486e90f08be3e4b0871..c28a8499aec7f4fa18c5bd4c71922812d2ebf143 100644 (file)
 #define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 /* R2 or later cores check for RI/XI support to determine _PAGE_READ */
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 #define _PAGE_WRITE_SHIFT      (_PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
 #else
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 
 /* Only R2 or newer cores have the XI bit */
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 #define _PAGE_NO_EXEC_SHIFT    (_PAGE_SPLITTING_SHIFT + 1)
 #else
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_SPLITTING_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 /* XI - page cannot be executed */
 #ifndef _PAGE_NO_EXEC_SHIFT
 #define _PAGE_NO_EXEC_SHIFT    (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 
-#else  /* !CONFIG_CPU_MIPSR2 */
+#else  /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
  */
 static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 {
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (cpu_has_rixi) {
                int sa;
 #ifdef CONFIG_32BIT
index e92d6c4b5ed192305b0b1f1605481f745cfadb10..7163cd7fdd69a622892e4be83acbe0450e8f2af0 100644 (file)
@@ -104,7 +104,6 @@ do {                                                                        \
        if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))          \
                __fpsave = FP_SAVE_VECTOR;                              \
        (last) = resume(prev, next, task_thread_info(next), __fpsave);  \
-       disable_msa();                                                  \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
@@ -122,6 +121,7 @@ do {                                                                        \
        if (cpu_has_userlocal)                                          \
                write_c0_userlocal(current_thread_info()->tp_value);    \
        __restore_watch();                                              \
+       disable_msa();                                                  \
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
index e36515dcd3b29efcdb0014c7dfd4541805eb4e4c..209e5b76c1bce56f02ceeb1fdeffeccc6fe46bd8 100644 (file)
@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
 {
        unsigned long sr, mask, fcsr, fcsr0, fcsr1;
 
+       fcsr = c->fpu_csr31;
        mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
 
        sr = read_c0_status();
        __enable_fpu(FPU_AS_IS);
 
-       fcsr = read_32bit_cp1_register(CP1_STATUS);
-
        fcsr0 = fcsr & mask;
        write_32bit_cp1_register(CP1_STATUS, fcsr0);
        fcsr0 = read_32bit_cp1_register(CP1_STATUS);
index d2bfbc2e8995fba3b6da1ad7a190f0d872ca6fbf..3c8a18a00a65fee62e7cc11068d3866b18b04fd1 100644 (file)
@@ -29,7 +29,7 @@
 int kgdb_early_setup;
 #endif
 
-static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
+static DECLARE_BITMAP(irq_map, NR_IRQS);
 
 int allocate_irqno(void)
 {
@@ -109,7 +109,7 @@ void __init init_IRQ(void)
 #endif
 }
 
-#ifdef DEBUG_STACKOVERFLOW
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
 static inline void check_stack_overflow(void)
 {
        unsigned long sp;
index fd528d7ea27867ffed69abf25d3c7b3f374b7f64..336708ae5c5b4c74b75416058feabb4bef5e30b1 100644 (file)
@@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = {
 static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 {
        memcpy((void *)dst, start, end - start);
-       dma_cache_wback((unsigned long)start, end - start);
+       dma_cache_wback(dst, end - start);
        local_flush_icache_range(dst, dst + (end - start));
        instruction_hazard();
 }
index 4b50c5787e25bdb4bdb28eb7736296e1d5d3812c..d5fa3eaf39a106546f52d82ec3e5391302ef8dec 100644 (file)
@@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                if (vcpu->mmio_needed == 2)
                        *gpr = *(int16_t *) run->mmio.data;
                else
-                       *gpr = *(int16_t *) run->mmio.data;
+                       *gpr = *(uint16_t *)run->mmio.data;
 
                break;
        case 1:
index 7d12c0dded3ded2009f85ffd7ed7d6e52f645c0c..77e64942f0048c5aac366c0c80a6cf63f0c656d5 100644 (file)
@@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm)
 FEXPORT(__strnlen_\func\()_nocheck_asm)
        move            v0, a0
        PTR_ADDU        a1, a0                  # stop pointer
-1:     beq             v0, a1, 1f              # limit reached?
+1:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+       .set            noat
+       li              AT, 1
+#endif
+       beq             v0, a1, 1f              # limit reached?
 .ifeqs "\func", "kernel"
        EX(lb, t0, (v0), .Lfault\@)
 .else
@@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
 .endif
        .set            noreorder
        bnez            t0, 1b
-1:      PTR_ADDIU      v0, 1
+1:
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+        PTR_ADDIU      v0, 1
+#else
+        PTR_ADDU       v0, AT
+       .set            at
+#endif
        .set            reorder
        PTR_SUBU        v0, a0
        jr              ra
index e70c33fdb88153ac6bfdf12a4f632d3b3a26ccb9..f2e8153e44f536213e196002f005bb86da9ef72f 100644 (file)
@@ -3,15 +3,13 @@
 #
 
 obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
-    bonito-irq.o mem.o machtype.o platform.o
+    bonito-irq.o mem.o machtype.o platform.o serial.o
 obj-$(CONFIG_PCI) += pci.o
 
 #
 # Serial port support
 #
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
-obj-y += $(loongson-serial-m) $(loongson-serial-y)
 obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
 obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
 
index e3c68b5da18da4012de0aaed6363d5a5484d5e41..509877c6e9d908d7bac6110982c7208ab69204af 100644 (file)
@@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
                c0count = read_c0_count();
-               for (i = 1; i < loongson_sysconf.nr_cpus; i++)
+               for (i = 1; i < num_possible_cpus(); i++)
                        per_cpu(core0_c0count, i) = c0count;
        }
 }
index 0dbb65a51ce5b1c2913cfec00571710e3a0ecb10..2e03ab1735911d202ce82c97b4911b5c1002ed70 100644 (file)
@@ -1372,7 +1372,7 @@ static int probe_scache(void)
        scache_size = addr;
        c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
        c->scache.ways = 1;
-       c->dcache.waybit = 0;           /* does not matter */
+       c->scache.waybit = 0;           /* does not matter */
 
        return 1;
 }
index 5d6139390bf830adf503d67d004a5322d8eb7ad4..e23fdf2a9c80d2f0dbbb498343efb859c08f3b4e 100644 (file)
@@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
                sp_off += config_enabled(CONFIG_64BIT) ?
                        (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
 
-       /*
-        * Subtract the bytes for the last registers since we only care about
-        * the location on the stack pointer.
-        */
-       return sp_off - RSIZE;
+       return sp_off;
 }
 
 static void build_prologue(struct jit_ctx *ctx)
index e20b02e3ae28be201789dd260ab79a382be944f8..e10d10b9e82a98bf5e53382d88cbc98b769ef53c 100644 (file)
@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
                addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
                type & ILL_ACC_LEN_M);
 
-       rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
+       rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
 
        return IRQ_HANDLED;
 }
index ba8593a515baaa274d968aa64e6f54125238c032..de156ba3bd71c0d4db274a619c7c9fd6038c119c 100644 (file)
@@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  * We get 160 bytes stack space from calling function, but only use
  * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
  */
-#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
+#define STK_SPACE      (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_160_UNUSED (160 - 11 * 8)
+#define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
 
index 20c146d1251ae2cd6c07279bf371adae6b2e3a1e..55423d8be580113d045d30edbf86d26fb74340ff 100644 (file)
@@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
        }
        /* Setup stack and backchain */
        if (jit->seen & SEEN_STACK) {
-               /* lgr %bfp,%r15 (BPF frame pointer) */
-               EMIT4(0xb9040000, BPF_REG_FP, REG_15);
+               if (jit->seen & SEEN_FUNC)
+                       /* lgr %w1,%r15 (backchain) */
+                       EMIT4(0xb9040000, REG_W1, REG_15);
+               /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+               EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
                /* aghi %r15,-STK_OFF */
                EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
                if (jit->seen & SEEN_FUNC)
-                       /* stg %bfp,152(%r15) (backchain) */
-                       EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
+                       /* stg %w1,152(%r15) (backchain) */
+                       EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
        /*
index 00b7d3a2fc60681253eb2e1c1b874e48bbd02a4a..16efa3ad037f7cffbdbb4a5ffcf57a5d25325648 100644 (file)
@@ -175,10 +175,10 @@ ENTRY(__clear_user)
        br      r3
 
        .section .fixup, "ax"
+99:
        br      r3
        .previous
        .section __ex_table, "a"
        .align  2
-99:
        .word   0b, 99b
        .previous
index a6e424d185d063bdddd43719e6a994fad6902c17..a6cfdabb6054aef28846342f49fdb2718e1263a5 100644 (file)
@@ -24,7 +24,8 @@ typedef struct {
        unsigned int    icache_line_size;
        unsigned int    ecache_size;
        unsigned int    ecache_line_size;
-       int             core_id;
+       unsigned short  sock_id;
+       unsigned short  core_id;
        int             proc_id;
 } cpuinfo_sparc;
 
index dc165ebdf05aef6086bf5d5c5b1dd3a85f686648..2a52c91d2c8acbf5f904e082400ba782d7279947 100644 (file)
@@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
        "       sllx            %1, 32, %1\n"
        "       or              %0, %1, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       sethi           %%uhi(%4), %1\n"
+       "       sethi           %%hi(%4), %0\n"
+       "       .word           662b\n"
+       "       or              %1, %%ulo(%4), %1\n"
+       "       or              %0, %%lo(%4), %0\n"
+       "       .word           663b\n"
+       "       sllx            %1, 32, %1\n"
+       "       or              %0, %1, %0\n"
+       "       .previous\n"
        : "=r" (mask), "=r" (tmp)
        : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
               _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
          "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
               _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
+              _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
+         "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+              _PAGE_CP_4V | _PAGE_E_4V |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 
        return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
        "       andn            %0, %4, %0\n"
        "       or              %0, %5, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       andn            %0, %6, %0\n"
+       "       or              %0, %5, %0\n"
+       "       .previous\n"
        : "=r" (val)
        : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
-                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
+                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
+                    "i" (_PAGE_CP_4V));
 
        return __pgprot(val);
 }
index ed8f071132e4d0e045bd9ffe22ce90f6604e0e3a..d1761df5cca6fe2814c19a343274d2884a3cf0c4 100644 (file)
@@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
-#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
+#define topology_core_cpumask(cpu)             (&cpu_core_sib_map[cpu])
 #define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
 #endif /* CONFIG_SMP */
 
 extern cpumask_t cpu_core_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_map[NR_CPUS];
 static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
         return &cpu_core_map[cpu];
index 6fd4436d32f06a59ed3113db3e6e52ddf3d3fa93..ec9c04de3664910d81b7a55bbb09084d5e235d39 100644 (file)
@@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry {
 };
 extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
        __sun4v_2insn_patch_end;
+extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
+       __sun_m7_2insn_patch_end;
 
 
 #endif /* !(__ASSEMBLY__) */
index 07cc49e541f40ea2cacc1f952aa7e07dd4a4e69b..0f679421b468343c747ac48abd2046b7e6ce051e 100644 (file)
@@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
                             struct sun4v_1insn_patch_entry *);
 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
                             struct sun4v_2insn_patch_entry *);
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                            struct sun4v_2insn_patch_entry *);
 extern unsigned int dcache_parity_tl1_occurred;
 extern unsigned int icache_parity_tl1_occurred;
 
index 94e392bdee7dce5c984cc9f6f70307313a8f5a01..814fb1729b120bdeccbe2aacea958e7ae8add28d 100644 (file)
@@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev)
                err = -ENOMEM;
                goto err1;
        }
-       memset(grpci2priv, 0, sizeof(*grpci2priv));
        priv->regs = regs;
        priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
        priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
index 26c80e18d7b1b47bd74e9fce01bdb48eeb88fcbd..6f80936e0eea4d0dab82966b8f69cd7e6127b1dd 100644 (file)
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
        }
 }
 
-static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
+                                char *srch_val,
+                                void (*func)(struct mdesc_handle *, u64, int),
+                                u64 val, int depth)
 {
-       u64 a;
-
-       mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
-               u64 t = mdesc_arc_target(hp, a);
-               const char *name;
-               const u64 *id;
+       u64 arc;
 
-               name = mdesc_node_name(hp, t);
-               if (!strcmp(name, "cpu")) {
-                       id = mdesc_get_property(hp, t, "id", NULL);
-                       if (*id < NR_CPUS)
-                               cpu_data(*id).core_id = core_id;
-               } else {
-                       u64 j;
+       /* Since we have an estimate of recursion depth, do a sanity check. */
+       if (depth == 0)
+               return;
 
-                       mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
-                               u64 n = mdesc_arc_target(hp, j);
-                               const char *n_name;
+       mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 n = mdesc_arc_target(hp, arc);
+               const char *name = mdesc_node_name(hp, n);
 
-                               n_name = mdesc_node_name(hp, n);
-                               if (strcmp(n_name, "cpu"))
-                                       continue;
+               if (!strcmp(srch_val, name))
+                       (*func)(hp, n, val);
 
-                               id = mdesc_get_property(hp, n, "id", NULL);
-                               if (*id < NR_CPUS)
-                                       cpu_data(*id).core_id = core_id;
-                       }
-               }
+               find_back_node_value(hp, n, srch_val, func, val, depth-1);
        }
 }
 
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
+                          int core_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).core_id = core_id;
+}
+
+static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
+                          int sock_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).sock_id = sock_id;
+}
+
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
+                         int core_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
+}
+
+static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
+                         int sock_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+}
+
 static void set_core_ids(struct mdesc_handle *hp)
 {
        int idx;
        u64 mp;
 
        idx = 1;
+
+       /* Identify unique cores by looking for cpus backpointed to by
+        * level 1 instruction caches.
+        */
        mdesc_for_each_node_by_name(hp, mp, "cache") {
                const u64 *level;
                const char *type;
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp)
                        continue;
 
                mark_core_ids(hp, mp, idx);
+               idx++;
+       }
+}
+
+static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+{
+       u64 mp;
+       int idx = 1;
+       int fnd = 0;
+
+       /* Identify unique sockets by looking for cpus backpointed to by
+        * shared level n caches.
+        */
+       mdesc_for_each_node_by_name(hp, mp, "cache") {
+               const u64 *cur_lvl;
+
+               cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
+               if (*cur_lvl != level)
+                       continue;
+
+               mark_sock_ids(hp, mp, idx);
+               idx++;
+               fnd = 1;
+       }
+       return fnd;
+}
+
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
+{
+       int idx = 1;
 
+       mdesc_for_each_node_by_name(hp, mp, "socket") {
+               u64 a;
+
+               mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+                       u64 t = mdesc_arc_target(hp, a);
+                       const char *name;
+                       const u64 *id;
+
+                       name = mdesc_node_name(hp, t);
+                       if (strcmp(name, "cpu"))
+                               continue;
+
+                       id = mdesc_get_property(hp, t, "id", NULL);
+                       if (*id < num_possible_cpus())
+                               cpu_data(*id).sock_id = idx;
+               }
                idx++;
        }
 }
 
+static void set_sock_ids(struct mdesc_handle *hp)
+{
+       u64 mp;
+
+       /* If machine description exposes sockets data use it.
+        * Otherwise fallback to use shared L3 or L2 caches.
+        */
+       mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
+       if (mp != MDESC_NODE_NULL)
+               return set_sock_ids_by_socket(hp, mp);
+
+       if (!set_sock_ids_by_cache(hp, 3))
+               set_sock_ids_by_cache(hp, 2);
+}
+
 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
 {
        u64 a;
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
                        continue;
 
                mark_proc_ids(hp, mp, idx);
-
                idx++;
        }
 }
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
 
        set_core_ids(hp);
        set_proc_ids(hp);
+       set_sock_ids(hp);
 
        mdesc_release(hp);
 
index 6f7251fd2eabc6b5b5790c2aa4486c84c042f814..c928bc64b4bac1b1c5eb88c71c348469796883c5 100644 (file)
@@ -1002,6 +1002,38 @@ static int __init pcibios_init(void)
 subsys_initcall(pcibios_init);
 
 #ifdef CONFIG_SYSFS
+
+#define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
+
+static void pcie_bus_slot_names(struct pci_bus *pbus)
+{
+       struct pci_dev *pdev;
+       struct pci_bus *bus;
+
+       list_for_each_entry(pdev, &pbus->devices, bus_list) {
+               char name[SLOT_NAME_SIZE];
+               struct pci_slot *pci_slot;
+               const u32 *slot_num;
+               int len;
+
+               slot_num = of_get_property(pdev->dev.of_node,
+                                          "physical-slot#", &len);
+
+               if (slot_num == NULL || len != 4)
+                       continue;
+
+               snprintf(name, sizeof(name), "%u", slot_num[0]);
+               pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
+
+               if (IS_ERR(pci_slot))
+                       pr_err("PCI: pci_create_slot returned %ld.\n",
+                              PTR_ERR(pci_slot));
+       }
+
+       list_for_each_entry(bus, &pbus->children, node)
+               pcie_bus_slot_names(bus);
+}
+
 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
 {
        const struct pci_slot_names {
@@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void)
 
        while ((pbus = pci_find_next_bus(pbus)) != NULL) {
                struct device_node *node;
+               struct pci_dev *pdev;
+
+               pdev = list_first_entry(&pbus->devices, struct pci_dev,
+                                       bus_list);
 
-               if (pbus->self) {
-                       /* PCI->PCI bridge */
-                       node = pbus->self->dev.of_node;
+               if (pdev && pci_is_pcie(pdev)) {
+                       pcie_bus_slot_names(pbus);
                } else {
-                       struct pci_pbm_info *pbm = pbus->sysdata;
 
-                       /* Host PCI controller */
-                       node = pbm->op->dev.of_node;
-               }
+                       if (pbus->self) {
+
+                               /* PCI->PCI bridge */
+                               node = pbus->self->dev.of_node;
+
+                       } else {
+                               struct pci_pbm_info *pbm = pbus->sysdata;
 
-               pci_bus_slot_names(node, pbus);
+                               /* Host PCI controller */
+                               node = pbm->op->dev.of_node;
+                       }
+
+                       pci_bus_slot_names(node, pbus);
+               }
        }
 
        return 0;
index c38d19fc27baac8821acc57cf2e42120b7d66a3e..f7b261749383b4992300ba4418b1d16ef7251360 100644 (file)
@@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
        }
 }
 
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+                            struct sun4v_2insn_patch_entry *end)
+{
+       while (start < end) {
+               unsigned long addr = start->addr;
+
+               *(unsigned int *) (addr +  0) = start->insns[0];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
+
+               *(unsigned int *) (addr +  4) = start->insns[1];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  4));
+
+               start++;
+       }
+}
+
 static void __init sun4v_patch(void)
 {
        extern void sun4v_hvapi_init(void);
@@ -267,6 +285,9 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+               sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
+                                        &__sun_m7_2insn_patch_end);
 
        sun4v_hvapi_init();
 }
index 61139d9924cae4a8fdf5d4d5366a31052ea29616..19cd08d1867285f059f768402e4df14c64d7871d 100644 (file)
@@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
+cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL(cpu_core_sib_map);
 
 static cpumask_t smp_commenced_mask;
 
@@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void)
                }
        }
 
+       for_each_present_cpu(i)  {
+               unsigned int j;
+
+               for_each_present_cpu(j)  {
+                       if (cpu_data(i).sock_id == cpu_data(j).sock_id)
+                               cpumask_set_cpu(j, &cpu_core_sib_map[i]);
+               }
+       }
+
        for_each_present_cpu(i) {
                unsigned int j;
 
index 09243057cb0b48f7fd1679129db63eb4094a8be6..f1a2f688b28a31fc47d2232f3ed10e9d95930223 100644 (file)
@@ -138,6 +138,11 @@ SECTIONS
                *(.pause_3insn_patch)
                __pause_3insn_patch_end = .;
        }
+       .sun_m7_2insn_patch : {
+               __sun_m7_2insn_patch = .;
+               *(.sun_m7_2insn_patch)
+               __sun_m7_2insn_patch_end = .;
+       }
        PERCPU_SECTION(SMP_CACHE_BYTES)
 
        . = ALIGN(PAGE_SIZE);
index 4ca0d6ba5ec8331c67f43f8515eb3737526208bb..559cb744112ccd608bf4288470398fb21350b0ce 100644 (file)
@@ -54,6 +54,7 @@
 #include "init_64.h"
 
 unsigned long kern_linear_pte_xor[4] __read_mostly;
+static unsigned long page_cache4v_flag;
 
 /* A bitmap, two bits for every 256MB of physical memory.  These two
  * bits determine what page size we use for kernel linear
@@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void)
 
 static void __init sun4v_linear_pte_xor_finalize(void)
 {
+       unsigned long pagecv_flag;
+
+       /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
+        * enables MCD error. Do not set bit 9 on M7 processor.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               pagecv_flag = 0x00;
+               break;
+       default:
+               pagecv_flag = _PAGE_CV_4V;
+               break;
+       }
 #ifndef CONFIG_DEBUG_PAGEALLOC
        if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
                kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
@@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
                kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
@@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
                kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
@@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void)
        return available;
 }
 
+#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
 /* We need to exclude reserved regions. This exclusion will include
  * vmlinux and initrd. To be more precise the initrd size could be used to
  * compute a new lower limit because it is freed later during initialization.
@@ -2034,6 +2055,25 @@ void __init paging_init(void)
        memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
 #endif
 
+       /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
+        * bit on M7 processor. This is a conflicting usage of the same
+        * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
+        * Detection error on all pages and this will lead to problems
+        * later. Kernel does not run with MCD enabled and hence rest
+        * of the required steps to fully configure memory corruption
+        * detection are not taken. We need to ensure TTE.mcde is not
+        * set on M7 processor. Compute the value of cacheability
+        * flag for use later taking this into consideration.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               page_cache4v_flag = _PAGE_CP_4V;
+               break;
+       default:
+               page_cache4v_flag = _PAGE_CACHE_4V;
+               break;
+       }
+
        if (tlb_type == hypervisor)
                sun4v_pgprot_init();
        else
@@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
-#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
-#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
-#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
-#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
-#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
-
 pgprot_t PAGE_KERNEL __read_mostly;
 EXPORT_SYMBOL(PAGE_KERNEL);
 
@@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
                    _PAGE_P_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                           _PAGE_CP_4V | _PAGE_CV_4V |
-                           _PAGE_P_4V | _PAGE_W_4V);
+                           page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
 
        pte_base |= _PAGE_PMD_HUGE;
 
@@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void)
        int i;
 
        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
-                               _PAGE_CACHE_4V | _PAGE_P_4V |
+                               page_cache4v_flag | _PAGE_P_4V |
                                __ACCESS_BITS_4V | __DIRTY_BITS_4V |
                                _PAGE_EXEC_4V);
        PAGE_KERNEL_LOCKED = PAGE_KERNEL;
 
        _PAGE_IE = _PAGE_IE_4V;
        _PAGE_E = _PAGE_E_4V;
-       _PAGE_CACHE = _PAGE_CACHE_4V;
+       _PAGE_CACHE = page_cache4v_flag;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
@@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void)
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
                PAGE_OFFSET;
 #endif
-       kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
-                                  _PAGE_P_4V | _PAGE_W_4V);
+       kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
+                                  _PAGE_W_4V);
 
        for (i = 1; i < 4; i++)
                kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
@@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void)
                             _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
                             _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
 
-       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
-       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
+       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
-       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
-       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                         __ACCESS_BITS_4V | _PAGE_EXEC_4V);
 
        page_exec_bit = _PAGE_EXEC_4V;
@@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr)
               _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                      _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
+                      page_cache4v_flag | _PAGE_P_4V |
                       _PAGE_EXEC_4V | _PAGE_W_4V);
 
        return val | paddr;
index 89dd0d78013aaff6c889340e0e3caceb4c8f8c88..805d25ca5f1db1602498c7047025b973ac788b3c 100644 (file)
@@ -2,15 +2,14 @@
 #define BOOT_COMPRESSED_MISC_H
 
 /*
- * we have to be careful, because no indirections are allowed here, and
- * paravirt_ops is a kind of one. As it will only run in baremetal anyway,
- * we just keep it from happening
+ * Special hack: we have to be careful, because no indirections are allowed here,
+ * and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
+ * we just keep it from happening. (This list needs to be extended when new
+ * paravirt and debugging variants are added.)
  */
 #undef CONFIG_PARAVIRT
+#undef CONFIG_PARAVIRT_SPINLOCKS
 #undef CONFIG_KASAN
-#ifdef CONFIG_X86_32
-#define _ASM_X86_DESC_H 1
-#endif
 
 #include <linux/linkage.h>
 #include <linux/screen_info.h>
index 19507ffa5d28e9ce3ddece3856dd9cde4446f7f8..5fabf1362942c65e5fc4327511e51487a14bd5d7 100644 (file)
@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
 static inline int user_mode(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
-       return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+       return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
 #else
        return !!(regs->cs & 3);
 #endif
index 5a9856eb12bad7edb0f9a333870e331f5677d588..7d5a1929d76b31bba69295e533e460ed50904cfd 100644 (file)
 #define TLS_SIZE                       (GDT_ENTRY_TLS_ENTRIES* 8)
 
 #ifdef __KERNEL__
+
+/*
+ * early_idt_handler_array is an array of entry points referenced in the
+ * early IDT.  For simplicity, it's a real array with one entry point
+ * every nine bytes.  That leaves room for an optional 'push $0' if the
+ * vector has no error code (two bytes), a 'push $vector_number' (two
+ * bytes), and a jump to the common entry code (up to five bytes).
+ */
+#define EARLY_IDT_HANDLER_SIZE 9
+
 #ifndef __ASSEMBLY__
 
-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
+extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 #ifdef CONFIG_TRACING
-# define trace_early_idt_handlers early_idt_handlers
+# define trace_early_idt_handler_array early_idt_handler_array
 #endif
 
 /*
index c469490db4a8d4a0c7ac7e71b17053fbd62b12e4..3c6bb342a48f1ad123ba82261c517c18266227b6 100644 (file)
 #define MSR_CORE_C3_RESIDENCY          0x000003fc
 #define MSR_CORE_C6_RESIDENCY          0x000003fd
 #define MSR_CORE_C7_RESIDENCY          0x000003fe
+#define MSR_KNL_CORE_C6_RESIDENCY      0x000003ff
 #define MSR_PKG_C2_RESIDENCY           0x0000060d
 #define MSR_PKG_C8_RESIDENCY           0x00000630
 #define MSR_PKG_C9_RESIDENCY           0x00000631
index e535533d5ab89313ba51937ad8dd5740413f119e..20190bdac9d58ecabddd3cbe6692d0f52bb687ad 100644 (file)
@@ -708,6 +708,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
        int i, ret = 0;
+       char *tmp;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
@@ -716,9 +717,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                        if (quirk_no_way_out)
                                quirk_no_way_out(i, m, regs);
                }
-               if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
-                   MCE_PANIC_SEVERITY)
+
+               if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       *msg = tmp;
                        ret = 1;
+               }
        }
        return ret;
 }
index 87848ebe2bb79a56625908c5a6af1b78055d70c9..4f7001f28936f74f9cc75ba6124ee54f76e0ebe4 100644 (file)
@@ -190,6 +190,7 @@ static bool check_hw_exists(void)
        u64 val, val_fail, val_new= ~0;
        int i, reg, reg_fail, ret = 0;
        int bios_fail = 0;
+       int reg_safe = -1;
 
        /*
         * Check to see if the BIOS enabled any of the counters, if so
@@ -204,6 +205,8 @@ static bool check_hw_exists(void)
                        bios_fail = 1;
                        val_fail = val;
                        reg_fail = reg;
+               } else {
+                       reg_safe = i;
                }
        }
 
@@ -221,12 +224,23 @@ static bool check_hw_exists(void)
                }
        }
 
+       /*
+        * If all the counters are enabled, the below test will always
+        * fail.  The tools will also become useless in this scenario.
+        * Just fail and disable the hardware counters.
+        */
+
+       if (reg_safe == -1) {
+               reg = reg_safe;
+               goto msr_fail;
+       }
+
        /*
         * Read the current value, change it and read it back to see if it
         * matches, this is needed to detect certain hardware emulators
         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
         */
-       reg = x86_pmu_event_addr(0);
+       reg = x86_pmu_event_addr(reg_safe);
        if (rdmsrl_safe(reg, &val))
                goto msr_fail;
        val ^= 0xffffUL;
@@ -611,6 +625,7 @@ struct sched_state {
        int     event;          /* event index */
        int     counter;        /* counter index */
        int     unassigned;     /* number of events to be assigned left */
+       int     nr_gp;          /* number of GP counters used */
        unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 };
 
@@ -620,27 +635,29 @@ struct sched_state {
 struct perf_sched {
        int                     max_weight;
        int                     max_events;
-       struct perf_event       **events;
-       struct sched_state      state;
+       int                     max_gp;
        int                     saved_states;
+       struct event_constraint **constraints;
+       struct sched_state      state;
        struct sched_state      saved[SCHED_STATES_MAX];
 };
 
 /*
  * Initialize interator that runs through all events and counters.
  */
-static void perf_sched_init(struct perf_sched *sched, struct perf_event **events,
-                           int num, int wmin, int wmax)
+static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
+                           int num, int wmin, int wmax, int gpmax)
 {
        int idx;
 
        memset(sched, 0, sizeof(*sched));
        sched->max_events       = num;
        sched->max_weight       = wmax;
-       sched->events           = events;
+       sched->max_gp           = gpmax;
+       sched->constraints      = constraints;
 
        for (idx = 0; idx < num; idx++) {
-               if (events[idx]->hw.constraint->weight == wmin)
+               if (constraints[idx]->weight == wmin)
                        break;
        }
 
@@ -687,7 +704,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        if (sched->state.event >= sched->max_events)
                return false;
 
-       c = sched->events[sched->state.event]->hw.constraint;
+       c = sched->constraints[sched->state.event];
        /* Prefer fixed purpose counters */
        if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
                idx = INTEL_PMC_IDX_FIXED;
@@ -696,11 +713,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
                                goto done;
                }
        }
+
        /* Grab the first unused counter starting with idx */
        idx = sched->state.counter;
        for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
-               if (!__test_and_set_bit(idx, sched->state.used))
+               if (!__test_and_set_bit(idx, sched->state.used)) {
+                       if (sched->state.nr_gp++ >= sched->max_gp)
+                               return false;
+
                        goto done;
+               }
        }
 
        return false;
@@ -745,7 +767,7 @@ static bool perf_sched_next_event(struct perf_sched *sched)
                        if (sched->state.weight > sched->max_weight)
                                return false;
                }
-               c = sched->events[sched->state.event]->hw.constraint;
+               c = sched->constraints[sched->state.event];
        } while (c->weight != sched->state.weight);
 
        sched->state.counter = 0;       /* start with first counter */
@@ -756,12 +778,12 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 /*
  * Assign a counter for each event.
  */
-int perf_assign_events(struct perf_event **events, int n,
-                       int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int gpmax, int *assign)
 {
        struct perf_sched sched;
 
-       perf_sched_init(&sched, events, n, wmin, wmax);
+       perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
 
        do {
                if (!perf_sched_find_counter(&sched))
@@ -788,9 +810,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                x86_pmu.start_scheduling(cpuc);
 
        for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               hwc = &cpuc->event_list[i]->hw;
+               cpuc->event_constraint[i] = NULL;
                c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
-               hwc->constraint = c;
+               cpuc->event_constraint[i] = c;
 
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
@@ -801,7 +823,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
         */
        for (i = 0; i < n; i++) {
                hwc = &cpuc->event_list[i]->hw;
-               c = hwc->constraint;
+               c = cpuc->event_constraint[i];
 
                /* never assigned */
                if (hwc->idx == -1)
@@ -821,9 +843,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
        }
 
        /* slow path */
-       if (i != n)
-               unsched = perf_assign_events(cpuc->event_list, n, wmin,
-                                            wmax, assign);
+       if (i != n) {
+               int gpmax = x86_pmu.num_counters;
+
+               /*
+                * Do not allow scheduling of more than half the available
+                * generic counters.
+                *
+                * This helps avoid counter starvation of sibling thread by
+                * ensuring at most half the counters cannot be in exclusive
+                * mode. There is no designated counters for the limits. Any
+                * N/2 counters can be used. This helps with events with
+                * specific counter constraints.
+                */
+               if (is_ht_workaround_enabled() && !cpuc->is_fake &&
+                   READ_ONCE(cpuc->excl_cntrs->exclusive_present))
+                       gpmax /= 2;
+
+               unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
+                                            wmax, gpmax, assign);
+       }
 
        /*
         * In case of success (unsched = 0), mark events as committed,
@@ -840,7 +879,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                        e = cpuc->event_list[i];
                        e->hw.flags |= PERF_X86_EVENT_COMMITTED;
                        if (x86_pmu.commit_scheduling)
-                               x86_pmu.commit_scheduling(cpuc, e, assign[i]);
+                               x86_pmu.commit_scheduling(cpuc, i, assign[i]);
                }
        }
 
@@ -1292,8 +1331,10 @@ static void x86_pmu_del(struct perf_event *event, int flags)
                x86_pmu.put_event_constraints(cpuc, event);
 
        /* Delete the array entry. */
-       while (++i < cpuc->n_events)
+       while (++i < cpuc->n_events) {
                cpuc->event_list[i-1] = cpuc->event_list[i];
+               cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+       }
        --cpuc->n_events;
 
        perf_event_update_userpage(event);
index 6ac5cb7a9e14839dcd0b622a91f0f0939133c81e..ef78516850fb0e3ef653ff97bad5152b46389787 100644 (file)
@@ -74,6 +74,7 @@ struct event_constraint {
 #define PERF_X86_EVENT_EXCL            0x0040 /* HT exclusivity on counter */
 #define PERF_X86_EVENT_DYNAMIC         0x0080 /* dynamic alloc'd constraint */
 #define PERF_X86_EVENT_RDPMC_ALLOWED   0x0100 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT       0x0200 /* accounted EXCL event */
 
 
 struct amd_nb {
@@ -134,8 +135,6 @@ enum intel_excl_state_type {
 struct intel_excl_states {
        enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
        enum intel_excl_state_type state[X86_PMC_IDX_MAX];
-       int  num_alloc_cntrs;/* #counters allocated */
-       int  max_alloc_cntrs;/* max #counters allowed */
        bool sched_started; /* true if scheduling has started */
 };
 
@@ -144,6 +143,11 @@ struct intel_excl_cntrs {
 
        struct intel_excl_states states[2];
 
+       union {
+               u16     has_exclusive[2];
+               u32     exclusive_present;
+       };
+
        int             refcnt;         /* per-core: #HT threads */
        unsigned        core_id;        /* per-core: core id */
 };
@@ -172,7 +176,11 @@ struct cpu_hw_events {
                                             added in the current transaction */
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];
+
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+       struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
+
+       int                     n_excl; /* the number of exclusive events */
 
        unsigned int            group_flag;
        int                     is_fake;
@@ -519,9 +527,7 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
 
-       void            (*commit_scheduling)(struct cpu_hw_events *cpuc,
-                                            struct perf_event *event,
-                                            int cntr);
+       void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
 
        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 
@@ -717,8 +723,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 
 void x86_pmu_enable_all(int added);
 
-int perf_assign_events(struct perf_event **events, int n,
-                       int wmin, int wmax, int *assign);
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int gpmax, int *assign);
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 
 void x86_pmu_stop(struct perf_event *event, int flags);
@@ -929,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
        return NULL;
 }
 
+static inline int is_ht_workaround_enabled(void)
+{
+       return 0;
+}
 #endif /* CONFIG_CPU_SUP_INTEL */
index 3998131d1a683058d6382b527c187028a7fede38..a1e35c9f06b9522af32b79cd4837f3a93a083f6b 100644 (file)
@@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
        xl = &excl_cntrs->states[tid];
 
        xl->sched_started = true;
-       xl->num_alloc_cntrs = 0;
        /*
         * lock shared state until we are done scheduling
         * in stop_event_scheduling()
@@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * across HT threads
         */
        is_excl = c->flags & PERF_X86_EVENT_EXCL;
+       if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
+               event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
+               if (!cpuc->n_excl++)
+                       WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
+       }
 
        /*
         * xl = state of current HT
@@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
        xl = &excl_cntrs->states[tid];
        xlo = &excl_cntrs->states[o_tid];
 
-       /*
-        * do not allow scheduling of more than max_alloc_cntrs
-        * which is set to half the available generic counters.
-        * this helps avoid counter starvation of sibling thread
-        * by ensuring at most half the counters cannot be in
-        * exclusive mode. There is not designated counters for the
-        * limits. Any N/2 counters can be used. This helps with
-        * events with specifix counter constraints
-        */
-       if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
-               return &emptyconstraint;
-
        cx = c;
 
        /*
@@ -2106,7 +2098,7 @@ static struct event_constraint *
 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                            struct perf_event *event)
 {
-       struct event_constraint *c1 = event->hw.constraint;
+       struct event_constraint *c1 = cpuc->event_constraint[idx];
        struct event_constraint *c2;
 
        /*
@@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
 
        xl = &excl_cntrs->states[tid];
        xlo = &excl_cntrs->states[o_tid];
+       if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
+               hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
+               if (!--cpuc->n_excl)
+                       WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
+       }
 
        /*
         * put_constraint may be called from x86_schedule_events()
@@ -2188,8 +2185,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
 {
-       struct event_constraint *c = event->hw.constraint;
-
        intel_put_shared_regs_event_constraints(cpuc, event);
 
        /*
@@ -2197,19 +2192,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
         * all events are subject to and must call the
         * put_excl_constraints() routine
         */
-       if (c && cpuc->excl_cntrs)
+       if (cpuc->excl_cntrs)
                intel_put_excl_constraints(cpuc, event);
-
-       /* cleanup dynamic constraint */
-       if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
-               event->hw.constraint = NULL;
 }
 
-static void intel_commit_scheduling(struct cpu_hw_events *cpuc,
-                                   struct perf_event *event, int cntr)
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
 {
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct event_constraint *c = event->hw.constraint;
+       struct event_constraint *c = cpuc->event_constraint[idx];
        struct intel_excl_states *xlo, *xl;
        int tid = cpuc->excl_thread_id;
        int o_tid = 1 - tid;
@@ -2639,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu)
                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
-               int h = x86_pmu.num_counters >> 1;
-
                for_each_cpu(i, topology_thread_cpumask(cpu)) {
                        struct intel_excl_cntrs *c;
 
@@ -2654,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu)
                }
                cpuc->excl_cntrs->core_id = core_id;
                cpuc->excl_cntrs->refcnt++;
-               /*
-                * set hard limit to half the number of generic counters
-                */
-               cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
-               cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
        }
 }
 
index 813f75d71175e3a117f13ec53efe6856a0508bec..7f73b3553e2ee03af8dd6283cf8b3182173d3a2d 100644 (file)
@@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event)
 
        cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
 
-       if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT)
+       if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
                cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
-       else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST)
+       else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled &= ~(1ULL << 63);
 
        if (cpuc->enabled)
index ffe666c2c6b58657b5895948a2e7d69f95223521..123ff1bb2f60363c9dc267329fb199dedecfde63 100644 (file)
@@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void)
 
                de_attr->attr.attr.name = pt_caps[i].name;
 
-               sysfs_attr_init(&de_attrs->attr.attr);
+               sysfs_attr_init(&de_attr->attr.attr);
 
                de_attr->attr.attr.mode         = S_IRUGO;
                de_attr->attr.show              = pt_cap_show;
@@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
                                   struct perf_output_handle *handle)
 
 {
-       unsigned long idx, npages, end;
+       unsigned long head = local64_read(&buf->head);
+       unsigned long idx, npages, wakeup;
 
        if (buf->snapshot)
                return 0;
@@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
        buf->topa_index[buf->stop_pos]->stop = 0;
        buf->topa_index[buf->intr_pos]->intr = 0;
 
-       if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
-               npages = (handle->size + 1) >> PAGE_SHIFT;
-               end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
-               /*if (end > handle->wakeup >> PAGE_SHIFT)
-                 end = handle->wakeup >> PAGE_SHIFT;*/
-               idx = end & (buf->nr_pages - 1);
-               buf->stop_pos = idx;
-               idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1;
-               idx &= buf->nr_pages - 1;
-               buf->intr_pos = idx;
-       }
+       /* how many pages till the STOP marker */
+       npages = handle->size >> PAGE_SHIFT;
+
+       /* if it's on a page boundary, fill up one more page */
+       if (!offset_in_page(head + handle->size + 1))
+               npages++;
+
+       idx = (head >> PAGE_SHIFT) + npages;
+       idx &= buf->nr_pages - 1;
+       buf->stop_pos = idx;
+
+       wakeup = handle->wakeup >> PAGE_SHIFT;
+
+       /* in the worst case, wake up the consumer one page before hard stop */
+       idx = (head >> PAGE_SHIFT) + npages - 1;
+       if (idx > wakeup)
+               idx = wakeup;
+
+       idx &= buf->nr_pages - 1;
+       buf->intr_pos = idx;
 
        buf->topa_index[buf->stop_pos]->stop = 1;
        buf->topa_index[buf->intr_pos]->intr = 1;
index c635b8b49e931e7926efc3dc96475a8c577958e0..90b7c501c95ba021a7017efa71463cef26111c66 100644 (file)
@@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
 
        for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               hwc = &box->event_list[i]->hw;
                c = uncore_get_event_constraint(box, box->event_list[i]);
-               hwc->constraint = c;
+               box->event_constraint[i] = c;
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
        }
@@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        /* fastpath, try to reuse previous register */
        for (i = 0; i < n; i++) {
                hwc = &box->event_list[i]->hw;
-               c = hwc->constraint;
+               c = box->event_constraint[i];
 
                /* never assigned */
                if (hwc->idx == -1)
@@ -395,8 +394,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        }
        /* slow path */
        if (i != n)
-               ret = perf_assign_events(box->event_list, n,
-                                        wmin, wmax, assign);
+               ret = perf_assign_events(box->event_constraint, n,
+                                        wmin, wmax, n, assign);
 
        if (!assign || ret) {
                for (i = 0; i < n; i++)
@@ -840,6 +839,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        box->phys_id = phys_id;
        box->pci_dev = pdev;
        box->pmu = pmu;
+       uncore_box_init(box);
        pci_set_drvdata(pdev, box);
 
        raw_spin_lock(&uncore_box_lock);
@@ -1003,8 +1003,10 @@ static int uncore_cpu_starting(int cpu)
                        pmu = &type->pmus[j];
                        box = *per_cpu_ptr(pmu->box, cpu);
                        /* called by uncore_cpu_init? */
-                       if (box && box->phys_id >= 0)
+                       if (box && box->phys_id >= 0) {
+                               uncore_box_init(box);
                                continue;
+                       }
 
                        for_each_online_cpu(k) {
                                exist = *per_cpu_ptr(pmu->box, k);
@@ -1020,8 +1022,10 @@ static int uncore_cpu_starting(int cpu)
                                }
                        }
 
-                       if (box)
+                       if (box) {
                                box->phys_id = phys_id;
+                               uncore_box_init(box);
+                       }
                }
        }
        return 0;
index 6c8c1e7e69d85d3ad217eada0f0e55573c3daaf0..ceac8f5dc0184b531548e302e11ae981e753a7e9 100644 (file)
@@ -97,6 +97,7 @@ struct intel_uncore_box {
        atomic_t refcnt;
        struct perf_event *events[UNCORE_PMC_IDX_MAX];
        struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+       struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
        unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
        u64 tags[UNCORE_PMC_IDX_MAX];
        struct pci_dev *pci_dev;
@@ -257,14 +258,6 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
        return box->pmu->type->num_counters;
 }
 
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
-       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
-               if (box->pmu->type->ops->init_box)
-                       box->pmu->type->ops->init_box(box);
-       }
-}
-
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
@@ -273,8 +266,6 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
 
 static inline void uncore_enable_box(struct intel_uncore_box *box)
 {
-       uncore_box_init(box);
-
        if (box->pmu->type->ops->enable_box)
                box->pmu->type->ops->enable_box(box);
 }
@@ -297,6 +288,14 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
        return box->pmu->type->ops->read_counter(box, event);
 }
 
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
+
 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 {
        return (box->phys_id < 0);
index 12d9548457e7195a8a36b458e374cab9cabe5e07..6d6e85dd5849878e9caa379ef20eaab10b97559f 100644 (file)
                                ((1ULL << (n)) - 1)))
 
 /* Haswell-EP Ubox */
-#define HSWEP_U_MSR_PMON_CTR0                  0x705
-#define HSWEP_U_MSR_PMON_CTL0                  0x709
+#define HSWEP_U_MSR_PMON_CTR0                  0x709
+#define HSWEP_U_MSR_PMON_CTL0                  0x705
 #define HSWEP_U_MSR_PMON_FILTER                        0x707
 
 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL                0x703
@@ -1914,7 +1914,7 @@ static struct intel_uncore_type hswep_uncore_cbox = {
        .name                   = "cbox",
        .num_counters           = 4,
        .num_boxes              = 18,
-       .perf_ctr_bits          = 44,
+       .perf_ctr_bits          = 48,
        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
index 2b55ee6db053c79fbe91a6119e613075be54111b..5a4668136e9892b6b8695d1d82edf86afbdea0a0 100644 (file)
@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        clear_bss();
 
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
-               set_intr_gate(i, early_idt_handlers[i]);
+               set_intr_gate(i, early_idt_handler_array[i]);
        load_idt((const struct desc_ptr *)&idt_descr);
 
        copy_bootdata(__va(real_mode_data));
index d031bad9e07eadf3a80bc69a449cd13a44ed8080..53eeb226657caa6868c826dd4b1fb83a1bf514a0 100644 (file)
@@ -478,21 +478,22 @@ is486:
 __INIT
 setup_once:
        /*
-        * Set up a idt with 256 entries pointing to ignore_int,
-        * interrupt gates. It doesn't actually load idt - that needs
-        * to be done on each CPU. Interrupts are enabled elsewhere,
-        * when we can be relatively sure everything is ok.
+        * Set up a idt with 256 interrupt gates that push zero if there
+        * is no error code and then jump to early_idt_handler_common.
+        * It doesn't actually load the idt - that needs to be done on
+        * each CPU. Interrupts are enabled elsewhere, when we can be
+        * relatively sure everything is ok.
         */
 
        movl $idt_table,%edi
-       movl $early_idt_handlers,%eax
+       movl $early_idt_handler_array,%eax
        movl $NUM_EXCEPTION_VECTORS,%ecx
 1:
        movl %eax,(%edi)
        movl %eax,4(%edi)
        /* interrupt gate, dpl=0, present */
        movl $(0x8E000000 + __KERNEL_CS),2(%edi)
-       addl $9,%eax
+       addl $EARLY_IDT_HANDLER_SIZE,%eax
        addl $8,%edi
        loop 1b
 
@@ -524,26 +525,28 @@ setup_once:
        andl $0,setup_once_ref  /* Once is enough, thanks */
        ret
 
-ENTRY(early_idt_handlers)
+ENTRY(early_idt_handler_array)
        # 36(%esp) %eflags
        # 32(%esp) %cs
        # 28(%esp) %eip
        # 24(%rsp) error code
        i = 0
        .rept NUM_EXCEPTION_VECTORS
-       .if (EXCEPTION_ERRCODE_MASK >> i) & 1
-       ASM_NOP2
-       .else
+       .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
        pushl $0                # Dummy error code, to make stack frame uniform
        .endif
        pushl $i                # 20(%esp) Vector number
-       jmp early_idt_handler
+       jmp early_idt_handler_common
        i = i + 1
+       .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
        .endr
-ENDPROC(early_idt_handlers)
+ENDPROC(early_idt_handler_array)
        
-       /* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+       /*
+        * The stack is the hardware frame, an error code or zero, and the
+        * vector number.
+        */
        cld
 
        cmpl $2,(%esp)          # X86_TRAP_NMI
@@ -603,7 +606,7 @@ ex_entry:
 is_nmi:
        addl $8,%esp            /* drop vector number and error code */
        iret
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
 /* This is the default interrupt "handler" :-) */
        ALIGN
index ae6588b301c248b3c281a1e072802e6764e9ac44..df7e78057ae007dab28bf625f4cbbba25cf9bcd6 100644 (file)
@@ -321,26 +321,28 @@ bad_address:
        jmp bad_address
 
        __INIT
-       .globl early_idt_handlers
-early_idt_handlers:
+ENTRY(early_idt_handler_array)
        # 104(%rsp) %rflags
        #  96(%rsp) %cs
        #  88(%rsp) %rip
        #  80(%rsp) error code
        i = 0
        .rept NUM_EXCEPTION_VECTORS
-       .if (EXCEPTION_ERRCODE_MASK >> i) & 1
-       ASM_NOP2
-       .else
+       .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
        pushq $0                # Dummy error code, to make stack frame uniform
        .endif
        pushq $i                # 72(%rsp) Vector number
-       jmp early_idt_handler
+       jmp early_idt_handler_common
        i = i + 1
+       .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
        .endr
+ENDPROC(early_idt_handler_array)
 
-/* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+       /*
+        * The stack is the hardware frame, an error code or zero, and the
+        * vector number.
+        */
        cld
 
        cmpl $2,(%rsp)          # X86_TRAP_NMI
@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
 is_nmi:
        addq $16,%rsp           # drop vector number and error code
        INTERRUPT_RETURN
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
        __INITDATA
 
index 009183276bb738fbd28805256ccfaa04e063c364..6185d3141219019d82fa2ea56c4f2f5728b8d17a 100644 (file)
@@ -173,6 +173,21 @@ static void init_thread_xstate(void)
                xstate_size = sizeof(struct i387_fxsave_struct);
        else
                xstate_size = sizeof(struct i387_fsave_struct);
+
+       /*
+        * Quirk: we don't yet handle the XSAVES* instructions
+        * correctly, as we don't correctly convert between
+        * standard and compacted format when interfacing
+        * with user-space - so disable it for now.
+        *
+        * The difference is small: with recent CPUs the
+        * compacted format is only marginally smaller than
+        * the standard FPU state format.
+        *
+        * ( This is easy to backport while we are fixing
+        *   XSAVES* support. )
+        */
+       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
 }
 
 /*
index 44a7d25154973437e0ce4233e142d01c43a948b9..b73337634214c209e250051cd21e00bd2436fcd6 100644 (file)
@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        u64 entry, gentry, *spte;
        int npte;
        bool remote_flush, local_flush, zap_page;
-       union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
-               .cr0_wp = 1,
-               .cr4_pae = 1,
-               .nxe = 1,
-               .smep_andnot_wp = 1,
-               .smap_andnot_wp = 1,
-       };
+       union kvm_mmu_page_role mask = { };
+
+       mask.cr0_wp = 1;
+       mask.cr4_pae = 1;
+       mask.nxe = 1;
+       mask.smep_andnot_wp = 1;
+       mask.smap_andnot_wp = 1;
 
        /*
         * If we don't have indirect shadow pages, it means no page is
index 99f76103c6b733e3d587652e3ab4228d20d57ac9..ddeff4844a100de83b52ef9dae9f42974666dc23 100644 (file)
@@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        }
        ctx.cleanup_addr = proglen;
 
-       for (pass = 0; pass < 10; pass++) {
+       /* JITed image shrinks with every pass and the loop iterates
+        * until the image stops shrinking. Very large bpf programs
+        * may converge on the last pass. In such case do one more
+        * pass to emit the final image
+        */
+       for (pass = 0; pass < 10 || image; pass++) {
                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
                if (proglen <= 0) {
                        image = NULL;
index d93963340c3c0d958385ce4ec8aadcdfe225a174..14a63ed6fe092cd3f512d155046942a28c3744e8 100644 (file)
@@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_sysdata *sd = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_sysdata *sd = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       }
        return 0;
 }
 
index 172a02a6ad146fea24ab966cf46a3612434a03da..ba78ccf651e7764e9db92cfca37927a8d68e3892 100644 (file)
@@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
        return -EINVAL;
 }
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       return NULL;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *vaddr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+}
+
 #endif /* _XTENSA_DMA_MAPPING_H */
index e68b71b85a7eaf0e3097debe8bf4dc4078e7a038..594eea04266e6d05f7256255552a1c4c72c664f3 100644 (file)
@@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
        return NOTIFY_OK;
 }
 
+/* hctx->ctxs will be freed in queue's release handler */
 static void blk_mq_exit_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 
        blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
        blk_free_flush_queue(hctx->fq);
-       kfree(hctx->ctxs);
        blk_mq_free_bitmap(&hctx->ctx_map);
 }
 
@@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q)
        unsigned int i;
 
        /* hctx kobj stays in hctx */
-       queue_for_each_hw_ctx(q, hctx, i)
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (!hctx)
+                       continue;
+               kfree(hctx->ctxs);
                kfree(hctx);
+       }
 
        kfree(q->queue_hw_ctx);
 
index 0a536dc05f3b559d6d04c1e819d65290f96f7c35..ea982eadaf6380b974d6b1d39a7197085217ac91 100644 (file)
@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        /* allocate ext devt */
        idr_preload(GFP_KERNEL);
 
-       spin_lock(&ext_devt_lock);
+       spin_lock_bh(&ext_devt_lock);
        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
-       spin_unlock(&ext_devt_lock);
+       spin_unlock_bh(&ext_devt_lock);
 
        idr_preload_end();
        if (idx < 0)
@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 }
 
@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
        disk->flags &= ~GENHD_FL_UP;
 
        sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
-       bdi_unregister(&disk->queue->backing_dev_info);
        blk_unregister_queue(disk);
        blk_unregister_region(disk_devt(disk), disk->minors);
 
@@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 
        return disk;
index 8aaf298a80e165f3fb5f83b1e00b8636cf9d08a3..362905e7c841ff55b204891e9e93748f2cfea96e 100644 (file)
@@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG
          This option enables the user-spaces interface for random
          number generator algorithms.
 
-config CRYPTO_USER_API_AEAD
-       tristate "User-space interface for AEAD cipher algorithms"
-       depends on NET
-       select CRYPTO_AEAD
-       select CRYPTO_USER_API
-       help
-         This option enables the user-spaces interface for AEAD
-         cipher algorithms.
-
 config CRYPTO_HASH_INFO
        bool
 
index 23716dd8a7ec3f569f82db531e1ed71bc330c7d6..5928d0746a270e7b6b2ee12a022b19ed731f03fe 100644 (file)
@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
                writel((cs->mbus_attr << 8) |
                       (dram->mbus_dram_target_id << 4) | 1,
                       hpriv->mmio + AHCI_WINDOW_CTRL(i));
-               writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+               writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
                writel(((cs->size - 1) & 0xffff0000),
                       hpriv->mmio + AHCI_WINDOW_SIZE(i));
        }
index 80a80548ad0a80acf28c3407e6a6048825f92af3..27245957eee3cd906f546d67853d2ebd6ce54d30 100644 (file)
@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
        },
        {},
 };
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
 
 static struct platform_driver octeon_cf_driver = {
        .probe          = octeon_cf_probe,
index 9c2ba1c97c4257016503a8ed4d2166ac19dea9c0..df0c66cb7ad3719016436dd7eb16ab1d3234568d 100644 (file)
@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
 {
        int ret;
 
-       if (init_cache_level(cpu))
+       if (init_cache_level(cpu) || !cache_leaves(cpu))
                return -ENOENT;
 
        per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
index da033d3bab3c69d14e55d63c4286632905120ae2..48c0e220acc0a1b8192ca6b523ad35ab7073eba7 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/memory.h>
+#include <linux/of.h>
 
 #include "base.h"
 
@@ -34,4 +35,5 @@ void __init driver_init(void)
        cpu_dev_init();
        memory_dev_init();
        container_dev_init();
+       of_core_init();
 }
index eb1fed5bd516ffac33c850eed47fad402250c686..3ccef9eba6f9dc53cecb785c23582cbdeb3b8618 100644 (file)
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
 
 config BLK_DEV_PMEM
        tristate "Persistent memory block device support"
+       depends on HAS_IOMEM
        help
          Saying Y here will allow you to use a contiguous range of reserved
          memory as one or more persistent block devices.
index 85b8036deaa3b7daaba5317ed746936a1f5183db..683dff272562b16d325df65495ad6a868cf45b14 100644 (file)
@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        struct nvme_iod *iod;
        dma_addr_t meta_dma = 0;
        void *meta = NULL;
+       void __user *metadata;
 
        if (copy_from_user(&io, uio, sizeof(io)))
                return -EFAULT;
@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
                meta_len = 0;
        }
 
+       metadata = (void __user *)(unsigned long)io.metadata;
+
        write = io.opcode & 1;
 
        switch (io.opcode) {
@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        if (meta_len) {
                meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
                                                &meta_dma, GFP_KERNEL);
+
                if (!meta) {
                        status = -ENOMEM;
                        goto unmap;
                }
                if (write) {
-                       if (copy_from_user(meta, (void __user *)io.metadata,
-                                                               meta_len)) {
+                       if (copy_from_user(meta, metadata, meta_len)) {
                                status = -EFAULT;
                                goto unmap;
                        }
@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        nvme_free_iod(dev, iod);
        if (meta) {
                if (status == NVME_SC_SUCCESS && !write) {
-                       if (copy_to_user((void __user *)io.metadata, meta,
-                                                               meta_len))
+                       if (copy_to_user(metadata, meta, meta_len))
                                status = -EFAULT;
                }
                dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
index 8dcbced0eafd5f8dc0a53dc8d8e9d4b37bad9bab..6e134f4759c0c9e98b93f221e7687004d4418342 100644 (file)
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
        memset(&zram->stats, 0, sizeof(zram->stats));
        zram->disksize = 0;
        zram->max_comp_streams = 1;
+
        set_capacity(zram->disk, 0);
+       part_stat_set_all(&zram->disk->part0, 0);
 
        up_write(&zram->init_lock);
        /* I/O operation under all of CPU are done so let's free */
index 5bd792c68f9b897ddbafe50a39c3bdb9b49306d6..ab3bde16ecb4443a2e63d8e327fa1db730294dff 100644 (file)
@@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
 
        /* Look for a specific device type */
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                if (type == dev_type)
                        return cdmm + drb * CDMM_DRB_SIZE;
@@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
        bus->discovered = true;
        pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
                rev  = (acsr & CDMM_ACSR_DEVREV)  >> CDMM_ACSR_DEVREV_SHIFT;
index fb9ec6221730a2d594f66d15e54471aea75cc750..6f047dcb94c22b3ea67349bf1c4b95be6b91037a 100644 (file)
@@ -58,7 +58,6 @@
 #include <linux/debugfs.h>
 #include <linux/log2.h>
 #include <linux/syscore_ops.h>
-#include <linux/memblock.h>
 
 /*
  * DDR target is the same on all platforms.
@@ -70,6 +69,7 @@
  */
 #define WIN_CTRL_OFF           0x0000
 #define   WIN_CTRL_ENABLE       BIT(0)
+/* Only on HW I/O coherency capable platforms */
 #define   WIN_CTRL_SYNCBARRIER  BIT(1)
 #define   WIN_CTRL_TGT_MASK     0xf0
 #define   WIN_CTRL_TGT_SHIFT    4
 
 /* Relative to mbusbridge_base */
 #define MBUS_BRIDGE_CTRL_OFF   0x0
-#define  MBUS_BRIDGE_SIZE_MASK  0xffff0000
 #define MBUS_BRIDGE_BASE_OFF   0x4
-#define  MBUS_BRIDGE_BASE_MASK  0xffff0000
 
 /* Maximum number of windows, for all known platforms */
 #define MBUS_WINS_MAX           20
@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
        ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
                (attr << WIN_CTRL_ATTR_SHIFT)    |
                (target << WIN_CTRL_TGT_SHIFT)   |
-               WIN_CTRL_SYNCBARRIER             |
                WIN_CTRL_ENABLE;
+       if (mbus->hw_io_coherency)
+               ctrl |= WIN_CTRL_SYNCBARRIER;
 
        writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
        writel(ctrl, addr + WIN_CTRL_OFF);
@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
                return MVEBU_MBUS_NO_REMAP;
 }
 
-/*
- * Use the memblock information to find the MBus bridge hole in the
- * physical address space.
- */
-static void __init
-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
-{
-       struct memblock_region *r;
-       uint64_t s = 0;
-
-       for_each_memblock(memory, r) {
-               /*
-                * This part of the memory is above 4 GB, so we don't
-                * care for the MBus bridge hole.
-                */
-               if (r->base >= 0x100000000)
-                       continue;
-
-               /*
-                * The MBus bridge hole is at the end of the RAM under
-                * the 4 GB limit.
-                */
-               if (r->base + r->size > s)
-                       s = r->base + r->size;
-       }
-
-       *start = s;
-       *end = 0x100000000;
-}
-
 static void __init
 mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
 {
        int i;
        int cs;
-       uint64_t mbus_bridge_base, mbus_bridge_end;
 
        mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
 
-       mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
-
        for (i = 0, cs = 0; i < 4; i++) {
-               u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
-               u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
-               u64 end;
-               struct mbus_dram_window *w;
-
-               /* Ignore entries that are not enabled */
-               if (!(size & DDR_SIZE_ENABLED))
-                       continue;
-
-               /*
-                * Ignore entries whose base address is above 2^32,
-                * since devices cannot DMA to such high addresses
-                */
-               if (base & DDR_BASE_CS_HIGH_MASK)
-                       continue;
-
-               base = base & DDR_BASE_CS_LOW_MASK;
-               size = (size | ~DDR_SIZE_MASK) + 1;
-               end = base + size;
-
-               /*
-                * Adjust base/size of the current CS to make sure it
-                * doesn't overlap with the MBus bridge hole. This is
-                * particularly important for devices that do DMA from
-                * DRAM to a SRAM mapped in a MBus window, such as the
-                * CESA cryptographic engine.
-                */
+               u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+               u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
 
                /*
-                * The CS is fully enclosed inside the MBus bridge
-                * area, so ignore it.
+                * We only take care of entries for which the chip
+                * select is enabled, and that don't have high base
+                * address bits set (devices can only access the first
+                * 32 bits of the memory).
                 */
-               if (base >= mbus_bridge_base && end <= mbus_bridge_end)
-                       continue;
+               if ((size & DDR_SIZE_ENABLED) &&
+                   !(base & DDR_BASE_CS_HIGH_MASK)) {
+                       struct mbus_dram_window *w;
 
-               /*
-                * Beginning of CS overlaps with end of MBus, raise CS
-                * base address, and shrink its size.
-                */
-               if (base >= mbus_bridge_base && end > mbus_bridge_end) {
-                       size -= mbus_bridge_end - base;
-                       base = mbus_bridge_end;
+                       w = &mvebu_mbus_dram_info.cs[cs++];
+                       w->cs_index = i;
+                       w->mbus_attr = 0xf & ~(1 << i);
+                       if (mbus->hw_io_coherency)
+                               w->mbus_attr |= ATTR_HW_COHERENCY;
+                       w->base = base & DDR_BASE_CS_LOW_MASK;
+                       w->size = (size | ~DDR_SIZE_MASK) + 1;
                }
-
-               /*
-                * End of CS overlaps with beginning of MBus, shrink
-                * CS size.
-                */
-               if (base < mbus_bridge_base && end > mbus_bridge_base)
-                       size -= end - mbus_bridge_base;
-
-               w = &mvebu_mbus_dram_info.cs[cs++];
-               w->cs_index = i;
-               w->mbus_attr = 0xf & ~(1 << i);
-               if (mbus->hw_io_coherency)
-                       w->mbus_attr |= ATTR_HW_COHERENCY;
-               w->base = base;
-               w->size = size;
        }
        mvebu_mbus_dram_info.num_cs = cs;
 }
index 933e4b338459284465d7970e3ff7dbf0f37314b8..7992164ea9ec2849f6ac3691629c47cda30aeb28 100644 (file)
 #define AT_XDMAC_MBR_UBC_NDV3          (0x3 << 27)     /* Next Descriptor View 3 */
 
 #define AT_XDMAC_MAX_CHAN      0x20
+#define AT_XDMAC_MAX_CSIZE     16      /* 16 data */
+#define AT_XDMAC_MAX_DWIDTH    8       /* 64 bits */
 
 #define AT_XDMAC_DMA_BUSWIDTHS\
        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -192,20 +194,17 @@ struct at_xdmac_chan {
        struct dma_chan                 chan;
        void __iomem                    *ch_regs;
        u32                             mask;           /* Channel Mask */
-       u32                             cfg[2];         /* Channel Configuration Register */
-       #define AT_XDMAC_DEV_TO_MEM_CFG 0               /* Predifined dev to mem channel conf */
-       #define AT_XDMAC_MEM_TO_DEV_CFG 1               /* Predifined mem to dev channel conf */
+       u32                             cfg;            /* Channel Configuration Register */
        u8                              perid;          /* Peripheral ID */
        u8                              perif;          /* Peripheral Interface */
        u8                              memif;          /* Memory Interface */
-       u32                             per_src_addr;
-       u32                             per_dst_addr;
        u32                             save_cc;
        u32                             save_cim;
        u32                             save_cnda;
        u32                             save_cndc;
        unsigned long                   status;
        struct tasklet_struct           tasklet;
+       struct dma_slave_config         sconfig;
 
        spinlock_t                      lock;
 
@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
        struct at_xdmac_desc    *desc = txd_to_at_desc(tx);
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(tx->chan);
        dma_cookie_t            cookie;
+       unsigned long           irqflags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, irqflags);
        cookie = dma_cookie_assign(tx);
 
        dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
        if (list_is_singular(&atchan->xfers_list))
                at_xdmac_start_xfer(atchan, desc);
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, irqflags);
        return cookie;
 }
 
@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
        return chan;
 }
 
+static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
+                                     enum dma_transfer_direction direction)
+{
+       struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
+       int                     csize, dwidth;
+
+       if (direction == DMA_DEV_TO_MEM) {
+               atchan->cfg =
+                       AT91_XDMAC_DT_PERID(atchan->perid)
+                       | AT_XDMAC_CC_DAM_INCREMENTED_AM
+                       | AT_XDMAC_CC_SAM_FIXED_AM
+                       | AT_XDMAC_CC_DIF(atchan->memif)
+                       | AT_XDMAC_CC_SIF(atchan->perif)
+                       | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+                       | AT_XDMAC_CC_DSYNC_PER2MEM
+                       | AT_XDMAC_CC_MBSIZE_SIXTEEN
+                       | AT_XDMAC_CC_TYPE_PER_TRAN;
+               csize = ffs(atchan->sconfig.src_maxburst) - 1;
+               if (csize < 0) {
+                       dev_err(chan2dev(chan), "invalid src maxburst value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+               dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
+               if (dwidth < 0) {
+                       dev_err(chan2dev(chan), "invalid src addr width value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+       } else if (direction == DMA_MEM_TO_DEV) {
+               atchan->cfg =
+                       AT91_XDMAC_DT_PERID(atchan->perid)
+                       | AT_XDMAC_CC_DAM_FIXED_AM
+                       | AT_XDMAC_CC_SAM_INCREMENTED_AM
+                       | AT_XDMAC_CC_DIF(atchan->perif)
+                       | AT_XDMAC_CC_SIF(atchan->memif)
+                       | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+                       | AT_XDMAC_CC_DSYNC_MEM2PER
+                       | AT_XDMAC_CC_MBSIZE_SIXTEEN
+                       | AT_XDMAC_CC_TYPE_PER_TRAN;
+               csize = ffs(atchan->sconfig.dst_maxburst) - 1;
+               if (csize < 0) {
+                       dev_err(chan2dev(chan), "invalid src maxburst value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+               dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
+               if (dwidth < 0) {
+                       dev_err(chan2dev(chan), "invalid dst addr width value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+       }
+
+       dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
+
+       return 0;
+}
+
+/*
+ * Only check that maxburst and addr width values are supported by the
+ * the controller but not that the configuration is good to perform the
+ * transfer since we don't know the direction at this stage.
+ */
+static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
+{
+       if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
+           || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
+               return -EINVAL;
+
+       if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
+           || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int at_xdmac_set_slave_config(struct dma_chan *chan,
                                      struct dma_slave_config *sconfig)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
-       u8 dwidth;
-       int csize;
 
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
-               AT91_XDMAC_DT_PERID(atchan->perid)
-               | AT_XDMAC_CC_DAM_INCREMENTED_AM
-               | AT_XDMAC_CC_SAM_FIXED_AM
-               | AT_XDMAC_CC_DIF(atchan->memif)
-               | AT_XDMAC_CC_SIF(atchan->perif)
-               | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
-               | AT_XDMAC_CC_DSYNC_PER2MEM
-               | AT_XDMAC_CC_MBSIZE_SIXTEEN
-               | AT_XDMAC_CC_TYPE_PER_TRAN;
-       csize = at_xdmac_csize(sconfig->src_maxburst);
-       if (csize < 0) {
-               dev_err(chan2dev(chan), "invalid src maxburst value\n");
+       if (at_xdmac_check_slave_config(sconfig)) {
+               dev_err(chan2dev(chan), "invalid slave configuration\n");
                return -EINVAL;
        }
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
-       dwidth = ffs(sconfig->src_addr_width) - 1;
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
-
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
-               AT91_XDMAC_DT_PERID(atchan->perid)
-               | AT_XDMAC_CC_DAM_FIXED_AM
-               | AT_XDMAC_CC_SAM_INCREMENTED_AM
-               | AT_XDMAC_CC_DIF(atchan->perif)
-               | AT_XDMAC_CC_SIF(atchan->memif)
-               | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
-               | AT_XDMAC_CC_DSYNC_MEM2PER
-               | AT_XDMAC_CC_MBSIZE_SIXTEEN
-               | AT_XDMAC_CC_TYPE_PER_TRAN;
-       csize = at_xdmac_csize(sconfig->dst_maxburst);
-       if (csize < 0) {
-               dev_err(chan2dev(chan), "invalid src maxburst value\n");
-               return -EINVAL;
-       }
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
-       dwidth = ffs(sconfig->dst_addr_width) - 1;
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
-       /* Src and dst addr are needed to configure the link list descriptor. */
-       atchan->per_src_addr = sconfig->src_addr;
-       atchan->per_dst_addr = sconfig->dst_addr;
 
-       dev_dbg(chan2dev(chan),
-               "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
-               __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
-               atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
-               atchan->per_src_addr, atchan->per_dst_addr);
+       memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
 
        return 0;
 }
@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct scatterlist      *sg;
        int                     i;
        unsigned int            xfer_size = 0;
+       unsigned long           irqflags;
+       struct dma_async_tx_descriptor  *ret = NULL;
 
        if (!sgl)
                return NULL;
@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                 flags);
 
        /* Protect dma_sconfig field that can be modified by set_slave_conf. */
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, irqflags);
+
+       if (at_xdmac_compute_chan_conf(chan, direction))
+               goto spin_unlock;
 
        /* Prepare descriptors. */
        for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                mem = sg_dma_address(sg);
                if (unlikely(!len)) {
                        dev_err(chan2dev(chan), "sg data length is zero\n");
-                       spin_unlock_bh(&atchan->lock);
-                       return NULL;
+                       goto spin_unlock;
                }
                dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
                         __func__, i, len, mem);
@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
-                       spin_unlock_bh(&atchan->lock);
-                       return NULL;
+                       goto spin_unlock;
                }
 
                /* Linked list descriptor setup. */
                if (direction == DMA_DEV_TO_MEM) {
-                       desc->lld.mbr_sa = atchan->per_src_addr;
+                       desc->lld.mbr_sa = atchan->sconfig.src_addr;
                        desc->lld.mbr_da = mem;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = mem;
-                       desc->lld.mbr_da = atchan->per_dst_addr;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
+               desc->lld.mbr_cfg = atchan->cfg;
                dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
                               ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                xfer_size += len;
        }
 
-       spin_unlock_bh(&atchan->lock);
 
        first->tx_dma_desc.flags = flags;
        first->xfer_size = xfer_size;
        first->direction = direction;
+       ret = &first->tx_dma_desc;
 
-       return &first->tx_dma_desc;
+spin_unlock:
+       spin_unlock_irqrestore(&atchan->lock, irqflags);
+       return ret;
 }
 
 static struct dma_async_tx_descriptor *
@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
        struct at_xdmac_desc    *first = NULL, *prev = NULL;
        unsigned int            periods = buf_len / period_len;
        int                     i;
+       unsigned long           irqflags;
 
        dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
                __func__, &buf_addr, buf_len, period_len,
@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
                return NULL;
        }
 
+       if (at_xdmac_compute_chan_conf(chan, direction))
+               return NULL;
+
        for (i = 0; i < periods; i++) {
                struct at_xdmac_desc    *desc = NULL;
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, irqflags);
                desc = at_xdmac_get_desc(atchan);
                if (!desc) {
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
-                       spin_unlock_bh(&atchan->lock);
+                       spin_unlock_irqrestore(&atchan->lock, irqflags);
                        return NULL;
                }
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, irqflags);
                dev_dbg(chan2dev(chan),
                        "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
                        __func__, desc, &desc->tx_dma_desc.phys);
 
                if (direction == DMA_DEV_TO_MEM) {
-                       desc->lld.mbr_sa = atchan->per_src_addr;
+                       desc->lld.mbr_sa = atchan->sconfig.src_addr;
                        desc->lld.mbr_da = buf_addr + i * period_len;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = buf_addr + i * period_len;
-                       desc->lld.mbr_da = atchan->per_dst_addr;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
+               desc->lld.mbr_cfg = atchan->cfg;
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
                        | AT_XDMAC_MBR_UBC_NDEN
                        | AT_XDMAC_MBR_UBC_NSEN
@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                                        | AT_XDMAC_CC_SIF(0)
                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
+       unsigned long           irqflags;
 
        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
                __func__, &src, &dest, len, flags);
@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
                dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, irqflags);
                desc = at_xdmac_get_desc(atchan);
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, irqflags);
                if (!desc) {
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        int                     residue;
        u32                     cur_nda, mask, value;
        u8                      dwidth = 0;
+       unsigned long           flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        if (!txstate)
                return ret;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
 
@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         */
        if (!desc->active_xfer) {
                dma_set_residue(txstate, desc->xfer_size);
-               spin_unlock_bh(&atchan->lock);
-               return ret;
+               goto spin_unlock;
        }
 
        residue = desc->xfer_size;
@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        }
        residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
 
-       spin_unlock_bh(&atchan->lock);
-
        dma_set_residue(txstate, residue);
 
        dev_dbg(chan2dev(chan),
                 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
                 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
 
+spin_unlock:
+       spin_unlock_irqrestore(&atchan->lock, flags);
        return ret;
 }
 
@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
 {
        struct at_xdmac_desc    *desc;
+       unsigned long           flags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        /*
         * If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
                        at_xdmac_start_xfer(atchan, desc);
        }
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        int ret;
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        ret = at_xdmac_set_slave_config(chan, config);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return ret;
 }
@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
        if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
                return 0;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
        while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
               & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
                cpu_relax();
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        if (!at_xdmac_chan_is_paused(atchan)) {
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
                return 0;
        }
 
        at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
        struct at_xdmac_desc    *desc, *_desc;
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
                cpu_relax();
@@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
                at_xdmac_remove_xfer(atchan, desc);
 
        clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac_desc    *desc;
        int                     i;
+       unsigned long           flags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        if (at_xdmac_chan_is_enabled(atchan)) {
                dev_err(chan2dev(chan),
@@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
        dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
 
 spin_unlock:
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
        return i;
 }
 
index 2890d744bb1bb902cc095fb87841c492cef7542c..3ddfd1f6c23c0f0f891ed11d6f68cbcaaa3c6e03 100644 (file)
@@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->directions = device->directions;
        caps->residue_granularity = device->residue_granularity;
 
-       caps->cmd_pause = !!device->device_pause;
+       /*
+        * Some devices implement only pause (e.g. to get residuum) but no
+        * resume. However cmd_pause is advertised as pause AND resume.
+        */
+       caps->cmd_pause = !!(device->device_pause && device->device_resume);
        caps->cmd_terminate = !!device->device_terminate_all;
 
        return 0;
index 9b84def7a35373a45cf18efc9d53939bcc86baf0..f42f71e37e73767a55078aef4c81bcd238b02db1 100644 (file)
@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 
        hsu_dma_stop_channel(hsuc);
-       hsuc->desc = NULL;
+       if (hsuc->desc) {
+               hsu_dma_desc_free(&hsuc->desc->vdesc);
+               hsuc->desc = NULL;
+       }
 
        vchan_get_all_descriptors(&hsuc->vchan, &head);
        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
index a7d9d3029b145dfa29babeee33022bc1f7354d52..340f9e607cd8b90dfe75add027c18bd26d67f1e0 100644 (file)
@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
        struct pl330_dmac *pl330 = pch->dmac;
        LIST_HEAD(list);
 
+       pm_runtime_get_sync(pl330->ddma.dev);
        spin_lock_irqsave(&pch->lock, flags);
        spin_lock(&pl330->lock);
        _stop(pch->thread);
@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
        list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
        spin_unlock_irqrestore(&pch->lock, flags);
+       pm_runtime_mark_last_busy(pl330->ddma.dev);
+       pm_runtime_put_autosuspend(pl330->ddma.dev);
 
        return 0;
 }
index 071c2c969eec06ad929ecfb871c614297a615e9e..72791232e46ba44ff474cf7dadff5ccb433d7348 100644 (file)
@@ -186,8 +186,20 @@ struct ibft_kobject {
 
 static struct iscsi_boot_kset *boot_kset;
 
+/* fully null address */
 static const char nulls[16];
 
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0xff, 0xff,
+                                       0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+       return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
 /*
  * Helper functions to parse data properly.
  */
@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_IP_ADDR:
-               if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr)))
+               if (address_not_null(nic->ip_addr))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_SUBNET_MASK:
@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_GATEWAY:
-               if (memcmp(nic->gateway, nulls, sizeof(nic->gateway)))
+               if (address_not_null(nic->gateway))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_PRIMARY_DNS:
-               if (memcmp(nic->primary_dns, nulls,
-                          sizeof(nic->primary_dns)))
+               if (address_not_null(nic->primary_dns))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_SECONDARY_DNS:
-               if (memcmp(nic->secondary_dns, nulls,
-                          sizeof(nic->secondary_dns)))
+               if (address_not_null(nic->secondary_dns))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_DHCP:
-               if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp)))
+               if (address_not_null(nic->dhcp))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_VLAN:
@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_ISNS_SERVER:
-               if (memcmp(init->isns_server, nulls,
-                          sizeof(init->isns_server)))
+               if (address_not_null(init->isns_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_SLP_SERVER:
-               if (memcmp(init->slp_server, nulls,
-                          sizeof(init->slp_server)))
+               if (address_not_null(init->slp_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
-               if (memcmp(init->pri_radius_server, nulls,
-                          sizeof(init->pri_radius_server)))
+               if (address_not_null(init->pri_radius_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
-               if (memcmp(init->sec_radius_server, nulls,
-                          sizeof(init->sec_radius_server)))
+               if (address_not_null(init->sec_radius_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_INITIATOR_NAME:
index 6b8115f342085bb3b25f78ad8c13ed6ce10a9d7b..83f281dda1e0f41fc4be3c8d2cb4e02407ab837d 100644 (file)
@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                = container_of(chip, struct kempld_gpio_data, chip);
        struct kempld_device_data *pld = gpio->pld;
 
-       return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+       return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
 }
 
 static int kempld_gpio_pincount(struct kempld_device_data *pld)
index 59eaa23767d8dca5bddf740fa90a24c9699c4a0b..6bc612b8a49fcf859261173e00d0e7389d7d2b05 100644 (file)
@@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock);
 static LIST_HEAD(gpio_lookup_list);
 LIST_HEAD(gpio_chips);
 
+
+static void gpiochip_free_hogs(struct gpio_chip *chip);
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+
+
 static inline void desc_set_label(struct gpio_desc *d, const char *label)
 {
        d->label = label;
@@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip)
 
 err_remove_chip:
        acpi_gpiochip_remove(chip);
+       gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&chip->list);
@@ -313,10 +319,6 @@ err_free_descs:
 }
 EXPORT_SYMBOL_GPL(gpiochip_add);
 
-/* Forward-declaration */
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
-static void gpiochip_free_hogs(struct gpio_chip *chip);
-
 /**
  * gpiochip_remove() - unregister a gpio_chip
  * @chip: the chip to unregister
index e469c4b2e8cc85981e3ba99cae0b28c1b7a9b2ec..c25728bc388a2be7134cb3e6b895a7a39d4189a2 100644 (file)
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.cpu_core_id_base);
        sysfs_show_32bit_prop(buffer, "simd_id_base",
                        dev->node_props.simd_id_base);
-       sysfs_show_32bit_prop(buffer, "capability",
-                       dev->node_props.capability);
        sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
                        dev->node_props.max_waves_per_simd);
        sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->gpu->kfd2kgd->get_fw_version(
                                                dev->gpu->kgd,
                                                KGD_ENGINE_MEC1));
+               sysfs_show_32bit_prop(buffer, "capability",
+                               dev->node_props.capability);
        }
 
        return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
index 40c1db9ad7c3fac84365cc331ec67661e902abb5..2f0ed11024eb8322676e000066a238e55b0dcb9b 100644 (file)
@@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                if (!crtc[i])
                        continue;
 
+               if (crtc[i]->cursor == plane)
+                       continue;
+
                /* There's no other way to figure out whether the crtc is running. */
                ret = drm_crtc_vblank_get(crtc[i]);
                if (ret == 0) {
index ffc305fc20768c29af6883eeb2d70553839cfa6e..eb7e61078a5b6f1088489b49b42b32abe8ffca42 100644 (file)
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
 
        mutex_unlock(&dev->mode_config.mutex);
 
-       return ret;
+       return ret ? ret : count;
 }
 
 static ssize_t status_show(struct device *device,
index 007c7d7d82950f597bb05ef8388fb1696ef72b38..dc55c51964ab501720f02ae682118ce12a51f0ff 100644 (file)
@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
        if (HAS_PCH_SPLIT(dev))
                sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-       else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+       else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
+                IS_I945G(dev) || IS_I945GM(dev))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
        else if (IS_I915GM(dev))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
        else if (IS_PINEVIEW(dev))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+       else if (IS_VALLEYVIEW(dev))
+               sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
        intel_runtime_pm_put(dev_priv);
 
index 53394f998a1f9429f87b78598a69e232a48d5b38..851b585987f9aebeaff662844f5d5d356bbc5e6f 100644 (file)
@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
-       if (list_empty(&ring->request_list))
-               return;
-
        WARN_ON(i915_verify_lists(ring->dev));
 
        /* Retire requests first as we use it above for the early return.
index f27346e907b1e9e4cb1d4d3eda9b33cca2f63033..d714a4b5711e4e7fa390ec6b659d2683ef41f585 100644 (file)
@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                                      DP_AUX_CH_CTL_RECEIVE_ERROR))
                                continue;
                        if (status & DP_AUX_CH_CTL_DONE)
-                               break;
+                               goto done;
                }
-               if (status & DP_AUX_CH_CTL_DONE)
-                       break;
        }
 
        if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                goto out;
        }
 
+done:
        /* Check for timeout or receive error.
         * Timeouts occur when the sink is not connected
         */
index 56e437e3158021a09641d188affc6129f0b1eda8..ae628001fd97873b67f99fb0128167858948afe6 100644 (file)
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
                                               struct intel_gmbus,
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       int i, reg_offset;
+       int i = 0, inc, try = 0, reg_offset;
        int ret = 0;
 
        intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
 
        reg_offset = dev_priv->gpio_mmio_base;
 
+retry:
        I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
-       for (i = 0; i < num; i++) {
+       for (; i < num; i += inc) {
+               inc = 1;
                if (gmbus_is_index_read(msgs, i, num)) {
                        ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
-                       i += 1;  /* set i to the index of the read xfer */
+                       inc = 2; /* an index read is two msgs */
                } else if (msgs[i].flags & I2C_M_RD) {
                        ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
                } else {
@@ -525,6 +527,18 @@ clear_err:
                         adapter->name, msgs[i].addr,
                         (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
+       /*
+        * Passive adapters sometimes NAK the first probe. Retry the first
+        * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+        * has retries internally. See also the retry loop in
+        * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+        */
+       if (ret == -ENXIO && i == 0 && try++ == 0) {
+               DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+                             adapter->name);
+               goto retry;
+       }
+
        goto out;
 
 timeout:
index 09df74b8e917b1dac90d460be50d1c4c5152881c..424e6219778712dcaf0e7c5c1ae7c51709fce6ba 100644 (file)
@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
+       if (ring->status_page.obj) {
+               I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+                          (u32)ring->status_page.gfx_addr);
+               POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+       }
+
        I915_WRITE(RING_MODE_GEN7(ring),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
index 441e2502b88946ff2d7455a9f26cc32faa87d8fc..005b5e04de4d74d13eee87af223c9e22687f6d35 100644 (file)
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
                            GEN6_WIZ_HASHING_MASK,
                            GEN6_WIZ_HASHING_16x4);
 
-       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-           INTEL_REVID(dev) == SKL_REVID_D0)
-               /* WaBarrierPerformanceFixDisable:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FENCE_DEST_SLM_DISABLE |
-                                 HDC_BARRIER_PERFORMANCE_DISABLE);
-
        return 0;
 }
 
@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
+       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+           INTEL_REVID(dev) == SKL_REVID_D0)
+               /* WaBarrierPerformanceFixDisable:skl */
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE |
+                                 HDC_BARRIER_PERFORMANCE_DISABLE);
+
        return skl_tune_iz_hashing(ring);
 }
 
index e87d2f418de4f381d50471494e5fe8050de4bdb2..987b81f31b0e693cfe7d505b2f66eecc7eac6539 100644 (file)
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 
        DRM_DEBUG_KMS("initialising analog device %d\n", device);
 
-       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+       intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
                return false;
 
index 0b5af0fe86598c613f923b2a74e1a81e065dbd89..64f8b2f687d29bb206ae1fe461706fbf1be3fe12 100644 (file)
@@ -14,7 +14,7 @@
 
 #define FERMI_TWOD_A                                                 0x0000902d
 
-#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x0000903d
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x00009039
 
 #define KEPLER_INLINE_TO_MEMORY_A                                    0x0000a040
 #define KEPLER_INLINE_TO_MEMORY_B                                    0x0000a140
index 2f5eadd12a9b611b5d6c790d1ea8bd39a7606736..fdb1dcf16a595ad6adabb27fd1db86c7b4797b9b 100644 (file)
@@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object)
        nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
 
        for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-       printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
                for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
                        nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
                nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
index e8778c67578ee41bcc490eca56b544137ea1ef53..c61102f708055ecf739077e1a1436dcd12ff70cb 100644 (file)
@@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit)
        return disable;
 }
 
-static int
+int
 gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                   struct nvkm_oclass *oclass, void *data, u32 size,
                   struct nvkm_object **pobject)
 {
+       struct nvkm_devinit_impl *impl = (void *)oclass;
        struct nv50_devinit_priv *priv;
+       u64 disable;
        int ret;
 
        ret = nvkm_devinit_create(parent, engine, oclass, &priv);
@@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       if (nv_rd32(priv, 0x022500) & 0x00000001)
+       disable = impl->disable(&priv->base);
+       if (disable & (1ULL << NVDEV_ENGINE_DISP))
                priv->base.post = true;
 
        return 0;
index b345a53e881dc6e0f84fa188dfb32d50c90a675e..87ca0ece37b4209114ed36fb8260907e922d4ba4 100644 (file)
@@ -48,7 +48,7 @@ struct nvkm_oclass *
 gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index 535172c5f1ad0fda328eff542db8024afac00c02..1076fcf0d71614e89cf279234da7412557dc17ed 100644 (file)
@@ -161,7 +161,7 @@ struct nvkm_oclass *
 gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index b882b65ff3cd2031ae6e9ccf18b1987b972846d3..9243521c80ac22de306f8b1c7113260765ebd12a 100644 (file)
@@ -15,6 +15,9 @@ int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
+int  gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
+                       struct nvkm_oclass *, void *, u32,
+                       struct nvkm_object **);
 int  gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 u64  gm107_devinit_disable(struct nvkm_devinit *);
index 42b2ea3fdcf3584680235e6d5cab14942f7f84b1..dac78ad24b31558aa53d917fb802865b6a122b61 100644 (file)
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                else
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
-               /* if there is no audio, set MINM_OVER_MAXP  */
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
-                       radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                if (rdev->family < CHIP_RV770)
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                /* use frac fb div on APUs */
index a0c35bbc85462587be95048bd996aecb8ec481e2..ba50f3c1c2e0332024959c46ebb99a877e7893b1 100644 (file)
@@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(4));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index f04205170b8a5942d73437ada72437bc18d028a8..cfa3a84a2af03c100741cb7e5b352781adf60b00 100644 (file)
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+       WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
                HDMI0_ACR_SOURCE |              /* select SW CTS value */
                HDMI0_ACR_AUTO_SEND);   /* allow hw to sent ACR packets when required */
 
index 05e6d6ef596385ecab450bbd1831e942186021d2..f848acfd3fc8a94fb4674cf13d9442857e367567 100644 (file)
@@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index 0926739c9fa7c40d17dd09bb318cc19c4ca194c1..9953356fe2637cfacdc2ba41e8ecd082d65213ff 100644 (file)
@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
        if (enable) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                        WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
                               HDMI_AVI_INFO_SEND | /* enable AVI info frames */
                               HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
        if (!dig || !dig->afmt)
                return;
 
-       if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+       if (enable && connector &&
+           drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
                struct radeon_connector_atom_dig *dig_connector;
index aba2f428c0a895380a4a3251e51c6484fba4b3be..64d3a771920db8a57a04cd343ba210d8ee2fefc7 100644 (file)
@@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(6));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index 25b4ac967742c034372caa1dbf67d6476fd6bb92..8f6d862a188228101dc9070f5ab2ada5f819d1a6 100644 (file)
@@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index dcb779647c570cea3fd0a29fbae41be53dcbb7df..25191f126f3bb63dffc5f812edf0b9fec335600b 100644 (file)
@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
        if (!connector || !connector->encoder)
                return;
 
-       if (!radeon_encoder_is_digital(connector->encoder))
-               return;
-
        rdev = connector->encoder->dev->dev_private;
 
        if (!radeon_audio_chipset_supported(rdev))
@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
        radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
-       if (!dig->afmt)
-               return;
-
        if (status == connector_status_connected) {
-               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector *radeon_connector;
+               int sink_type;
+
+               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       radeon_encoder->audio = NULL;
+                       return;
+               }
+
+               radeon_connector = to_radeon_connector(connector);
+               sink_type = radeon_dp_getsinktype(radeon_connector);
 
                if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-                   radeon_dp_getsinktype(radeon_connector) ==
-                   CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                       sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
                        radeon_encoder->audio = rdev->audio.dp_funcs;
                else
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
 
                dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-               } else {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0);
-                       dig->afmt->pin = NULL;
-               }
+               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
        } else {
                radeon_audio_enable(rdev, dig->afmt->pin, 0);
                dig->afmt->pin = NULL;
index d17d251dbd4fe5a1ac99a238fd7aebcf552682ba..cebb65e07e1d13f0bee01ee753f4c8a76e0e22d5 100644 (file)
@@ -1379,10 +1379,8 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index b7ca4c51462120fab3ab146dd74f653e8bcb91cb..a7fdfa4f0857b3a416e67d79007a1da731455b80 100644 (file)
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
+       /*
+        * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+        * after the CP ring have chew one packet at least. Hence here we stop
+        * and restart DPM after the radeon_ib_ring_tests().
+        */
+       if (rdev->pm.dpm_enabled &&
+           (rdev->pm.pm_method == PM_METHOD_DPM) &&
+           (rdev->family == CHIP_TURKS) &&
+           (rdev->flags & RADEON_IS_MOBILITY)) {
+               mutex_lock(&rdev->pm.mutex);
+               radeon_dpm_disable(rdev);
+               radeon_dpm_enable(rdev);
+               mutex_unlock(&rdev->pm.mutex);
+       }
+
        if ((radeon_testing & 1)) {
                if (rdev->accel_working)
                        radeon_test_moves(rdev);
index de42fc4a22b869296ff44c85c859678c6155ddd7..9c3377ca17b75ecd2092e4fd78a2238c126d88f1 100644 (file)
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                /* make sure object fit at this offset */
                eoffset = soffset + size;
                if (soffset >= eoffset) {
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
                last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
                if (last_pfn > rdev->vm_manager.max_pfn) {
                        dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
                                last_pfn, rdev->vm_manager.max_pfn);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
        } else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                                "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
                                soffset, tmp->bo, tmp->it.start, tmp->it.last);
                        mutex_unlock(&vm->mutex);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
        }
 
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
                        if (!tmp) {
                                mutex_unlock(&vm->mutex);
-                               return -ENOMEM;
+                               r = -ENOMEM;
+                               goto error_unreserve;
                        }
                        tmp->it.start = bo_va->it.start;
                        tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                r = radeon_vm_clear_bo(rdev, pt);
                if (r) {
                        radeon_bo_unref(&pt);
-                       radeon_bo_reserve(bo_va->bo, false);
                        return r;
                }
 
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 
        mutex_unlock(&vm->mutex);
        return 0;
+
+error_unreserve:
+       radeon_bo_unreserve(bo_va->bo);
+       return r;
 }
 
 /**
index c54d6313a46d243a226b5d969b597dcb062773ff..01ee96acb3985ea65ec2470456381251d51f444e 100644 (file)
@@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index 5326f753e10760ec04e27701e2eef4674139494f..4c679b802bc851db50450ee759c3d3f314d0bfdf 100644 (file)
@@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(4));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index 1055cb79096c6b508d568fd3c07255b05bf8b56c..3f4c7b8420287188288641e66b9f82addd5b21f3 100644 (file)
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o vgem_dma_buf.o
+vgem-y := vgem_drv.o
 
 obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
deleted file mode 100644 (file)
index 0254438..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright Â© 2012 Intel Corporation
- * Copyright Â© 2014 The Chromium OS Authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Widawsky <ben@bwidawsk.net>
- *
- */
-
-#include <linux/dma-buf.h>
-#include "vgem_drv.h"
-
-struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-int vgem_gem_prime_pin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       return vgem_gem_get_pages(obj);
-}
-
-void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       vgem_gem_put_pages(obj);
-}
-
-void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
-}
-
-void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-       vunmap(vaddr);
-}
-
-struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                            struct dma_buf *dma_buf)
-{
-       struct drm_vgem_gem_object *obj = NULL;
-       int ret;
-
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-       if (obj == NULL) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
-       if (ret) {
-               ret = -ENOMEM;
-               goto fail_free;
-       }
-
-       get_dma_buf(dma_buf);
-
-       obj->base.dma_buf = dma_buf;
-       obj->use_dma_buf = true;
-
-       return &obj->base;
-
-fail_free:
-       kfree(obj);
-fail:
-       return ERR_PTR(ret);
-}
index cb3b43525b2de3e512c2000dec12b1e92c923e2c..7a207ca547be24011fc0b89284ed7e728ded26f8 100644 (file)
@@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = {
 };
 
 static struct drm_driver vgem_driver = {
-       .driver_features                = DRIVER_GEM | DRIVER_PRIME,
+       .driver_features                = DRIVER_GEM,
        .gem_free_object                = vgem_gem_free_object,
        .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
        .fops                           = &vgem_driver_fops,
        .dumb_create                    = vgem_gem_dumb_create,
        .dumb_map_offset                = vgem_gem_dumb_map,
-       .prime_handle_to_fd             = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle             = drm_gem_prime_fd_to_handle,
-       .gem_prime_export               = drm_gem_prime_export,
-       .gem_prime_import               = vgem_gem_prime_import,
-       .gem_prime_pin                  = vgem_gem_prime_pin,
-       .gem_prime_unpin                = vgem_gem_prime_unpin,
-       .gem_prime_get_sg_table         = vgem_gem_prime_get_sg_table,
-       .gem_prime_vmap                 = vgem_gem_prime_vmap,
-       .gem_prime_vunmap               = vgem_gem_prime_vunmap,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
index 57ab4d8f41f92b083299d786d62f0ebca00577a1..e9f92f7ee275cf791b8f81d8a53c1a8f3c45ba8e 100644 (file)
@@ -43,15 +43,4 @@ struct drm_vgem_gem_object {
 extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
 extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
 
-/* vgem_dma_buf.c */
-extern struct sg_table *vgem_gem_prime_get_sg_table(
-                       struct drm_gem_object *gobj);
-extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
-extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                                   struct dma_buf *dma_buf);
-
-
 #endif
index f3830db02d4637675cebbe7b6b5e185492571a1e..37f01702d08195b1a9bab1f82fe88434302556b9 100644 (file)
@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 4fcb481032992f475e8d196dc3a9dbcfa2407b30..bd1c99deac71b73dadf15615c1e8442027bccee9 100644 (file)
@@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 112e4d45e4a0c31ff8234a36f008b629c71230a5..68800115876bf65867c187498902d0de6dcae687 100644 (file)
@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
 ntc_thermistor_parse_dt(struct platform_device *pdev)
 {
        struct iio_channel *chan;
+       enum iio_chan_type type;
        struct device_node *np = pdev->dev.of_node;
        struct ntc_thermistor_platform_data *pdata;
+       int ret;
 
        if (!np)
                return NULL;
@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        if (IS_ERR(chan))
                return ERR_CAST(chan);
 
+       ret = iio_get_channel_type(chan, &type);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       if (type != IIO_VOLTAGE)
+               return ERR_PTR(-EINVAL);
+
        if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
                return ERR_PTR(-ENODEV);
        if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
index 99664ebc738d8003139135a89f07f97e3c6305e2..ccf4cffe0ee1dfac282b9afc6e28da5340aa51f3 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/sysfs.h>
 
 /* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
        0x4e, 0x4f, I2C_CLIENT_END };
 
 enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
index 8fe78d08e01cf1551ea0eaf53f50d2185dfff809..7c6966434ee7b9a2707da849e56128af41c0baf5 100644 (file)
@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
 MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
 MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:i2c-hix5hd2");
+MODULE_ALIAS("platform:hix5hd2-i2c");
index 958c8db4ec30740e2d9aae00a7835256700d3424..297e9c9ac9432f5e645e06cf932710cd93c7f924 100644 (file)
@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        i2c->quirks = s3c24xx_get_device_quirks(pdev);
+       i2c->sysreg = ERR_PTR(-ENOENT);
        if (pdata)
                memcpy(i2c->pdata, pdata, sizeof(*pdata));
        else
index 89d8aa1d2818502f974c92f7925ea4440df3d97d..df12c57e6ce07a700d211b81c9b5d3c9c15ff2d3 100644 (file)
@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
 
 module_platform_driver(twl6030_gpadc_driver);
 
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
 MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
 MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
index 0916bf6b6c311c503931f26387712c6677b17645..73b189c1c0fb0fdcc73d64b7118ab4f79a12d41b 100644 (file)
 #define ADIS16400_NO_BURST             BIT(1)
 #define ADIS16400_HAS_SLOW_MODE                BIT(2)
 #define ADIS16400_HAS_SERIAL_NUMBER    BIT(3)
+#define ADIS16400_BURST_DIAG_STAT      BIT(4)
 
 struct adis16400_state;
 
@@ -165,6 +166,7 @@ struct adis16400_state {
        int                             filt_int;
 
        struct adis adis;
+       unsigned long avail_scan_mask[2];
 };
 
 /* At the moment triggers are only used for ring buffer
index 6e727ffe52621f43bb40f31466730705477961ef..90c24a23c679b8001e31cdff48b098cacb872682 100644 (file)
@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
 {
        struct adis16400_state *st = iio_priv(indio_dev);
        struct adis *adis = &st->adis;
-       uint16_t *tx;
+       unsigned int burst_length;
+       u8 *tx;
 
        if (st->variant->flags & ADIS16400_NO_BURST)
                return adis_update_scan_mode(indio_dev, scan_mask);
@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
        kfree(adis->xfer);
        kfree(adis->buffer);
 
+       /* All but the timestamp channel */
+       burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
+       if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+               burst_length += sizeof(u16);
+
        adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
        if (!adis->xfer)
                return -ENOMEM;
 
-       adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
-               GFP_KERNEL);
+       adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
        if (!adis->buffer)
                return -ENOMEM;
 
-       tx = adis->buffer + indio_dev->scan_bytes;
-
+       tx = adis->buffer + burst_length;
        tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
        tx[1] = 0;
 
        adis->xfer[0].tx_buf = tx;
        adis->xfer[0].bits_per_word = 8;
        adis->xfer[0].len = 2;
-       adis->xfer[1].tx_buf = tx;
+       adis->xfer[1].rx_buf = adis->buffer;
        adis->xfer[1].bits_per_word = 8;
-       adis->xfer[1].len = indio_dev->scan_bytes;
+       adis->xfer[1].len = burst_length;
 
        spi_message_init(&adis->msg);
        spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
        struct adis16400_state *st = iio_priv(indio_dev);
        struct adis *adis = &st->adis;
        u32 old_speed_hz = st->adis.spi->max_speed_hz;
+       void *buffer;
        int ret;
 
        if (!adis->buffer)
@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
                spi_setup(st->adis.spi);
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+       if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+               buffer = adis->buffer + sizeof(u16);
+       else
+               buffer = adis->buffer;
+
+       iio_push_to_buffers_with_timestamp(indio_dev, buffer,
                pf->timestamp);
 
        iio_trigger_notify_done(indio_dev->trig);
index fa795dcd5f75ec0a1e8de143bc0122ef36bf9409..2fd68f2219a7d422a604b91ce90138f1050528cd 100644 (file)
@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
                        *val = st->variant->temp_scale_nano / 1000000;
                        *val2 = (st->variant->temp_scale_nano % 1000000);
                        return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_PRESSURE:
+                       /* 20 uBar = 0.002kPascal */
+                       *val = 0;
+                       *val2 = 2000;
+                       return IIO_VAL_INT_PLUS_MICRO;
                default:
                        return -EINVAL;
                }
@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
        }
 }
 
-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
        .type = IIO_VOLTAGE, \
        .indexed = 1, \
-       .channel = 0, \
+       .channel = chn, \
        .extend_name = name, \
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
                BIT(IIO_CHAN_INFO_SCALE), \
@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
 }
 
 #define ADIS16400_SUPPLY_CHAN(addr, bits) \
-       ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+       ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
 
 #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
-       ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+       ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
 
 #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
        .type = IIO_ANGL_VEL, \
@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
                .channels = adis16448_channels,
                .num_channels = ARRAY_SIZE(adis16448_channels),
                .flags = ADIS16400_HAS_PROD_ID |
-                               ADIS16400_HAS_SERIAL_NUMBER,
+                               ADIS16400_HAS_SERIAL_NUMBER |
+                               ADIS16400_BURST_DIAG_STAT,
                .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
                .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
                .temp_scale_nano = 73860000, /* 0.07386 C */
@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
        .debugfs_reg_access = adis_debugfs_reg_access,
 };
 
-static const unsigned long adis16400_burst_scan_mask[] = {
-       ~0UL,
-       0,
-};
-
 static const char * const adis16400_status_error_msgs[] = {
        [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
        [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
                BIT(ADIS16400_DIAG_STAT_POWER_LOW),
 };
 
+static void adis16400_setup_chan_mask(struct adis16400_state *st)
+{
+       const struct adis16400_chip_info *chip_info = st->variant;
+       unsigned i;
+
+       for (i = 0; i < chip_info->num_channels; i++) {
+               const struct iio_chan_spec *ch = &chip_info->channels[i];
+
+               if (ch->scan_index >= 0 &&
+                   ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
+                       st->avail_scan_mask[0] |= BIT(ch->scan_index);
+       }
+}
+
 static int adis16400_probe(struct spi_device *spi)
 {
        struct adis16400_state *st;
@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
        indio_dev->info = &adis16400_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
-       if (!(st->variant->flags & ADIS16400_NO_BURST))
-               indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+       if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+               adis16400_setup_chan_mask(st);
+               indio_dev->available_scan_masks = st->avail_scan_mask;
+       }
 
        ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
        if (ret)
index 327529ee85eb1ed20bb8b7afad8022df94118108..3f40319a55da364f2e757acb7bc0e83d86c78c38 100644 (file)
@@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
        return 0;
 
 err_prot_mr:
-       ib_dereg_mr(desc->pi_ctx->prot_mr);
+       ib_dereg_mr(pi_ctx->prot_mr);
 err_prot_frpl:
-       ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+       ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
 err_pi_ctx:
-       kfree(desc->pi_ctx);
+       kfree(pi_ctx);
 
        return ret;
 }
index 7752bd59d4b7d529218dad186c155a91a9922ad1..a353b7de6d22e91a52378cd4c106b17cafc26a07 100644 (file)
@@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
        right = (packet[1] & 0x02) >> 1;
        middle = (packet[1] & 0x04) >> 2;
 
-       /* Divide 2 since trackpoint's speed is too fast */
-       input_report_rel(dev2, REL_X, (char)x / 2);
-       input_report_rel(dev2, REL_Y, -((char)y / 2));
+       input_report_rel(dev2, REL_X, (char)x);
+       input_report_rel(dev2, REL_Y, -((char)y));
 
        input_report_key(dev2, BTN_LEFT, left);
        input_report_key(dev2, BTN_RIGHT, right);
index 79363b6871959ec2b74c24f4b9e9b89b172d950d..ce3d40004458c87392339472f654462fae7cf0bc 100644 (file)
@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
                return true;
 
        /*
-        * Some models have a revision higher then 20. Meaning param[2] may
-        * be 10 or 20, skip the rates check for these.
+        * Some hw_version >= 4 models have a revision higher then 20. Meaning
+        * that param[2] may be 10 or 20, skip the rates check for these.
         */
-       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+       if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
+           param[2] < 40)
                return true;
 
        for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 9:
                case 10:
                case 13:
+               case 14:
                        etd->hw_version = 4;
                        break;
                default:
index 630af73e98c488a5e266e4ccb6eed5dba622f3d3..35c8d0ceabeebf989b8eeff5cd54ee8f3ac2e247 100644 (file)
@@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, 2961},
                1024, 5112, 2024, 4832
        },
+       {
+               (const char * const []){"LEN2000", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
+               1024, 5113, 2021, 4832
+       },
        {
                (const char * const []){"LEN2001", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0045",
        "LEN0047",
        "LEN0049",
-       "LEN2000",
+       "LEN2000", /* S540 */
        "LEN2001", /* Edge E431 */
        "LEN2002", /* Edge E531 */
        "LEN2003",
index e43d48956dea239fe6816bdb23f0174754c623ee..c5677ed2cd891d530ab02806f4c21782e4db226f 100644 (file)
 
 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 
-/* A list of preallocated protection domains */
-static LIST_HEAD(iommu_pd_list);
-static DEFINE_SPINLOCK(iommu_pd_list_lock);
-
 /* List of all available dev_data structures */
 static LIST_HEAD(dev_data_list);
 static DEFINE_SPINLOCK(dev_data_list_lock);
@@ -119,7 +115,7 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
-static int __init alloc_passthrough_domain(void);
+static int alloc_passthrough_domain(void);
 
 /****************************************************************************
  *
@@ -234,31 +230,38 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 }
 
 /*
- * In this function the list of preallocated protection domains is traversed to
- * find the domain for a specific device
+ * This function actually applies the mapping to the page table of the
+ * dma_ops domain.
  */
-static struct dma_ops_domain *find_protection_domain(u16 devid)
+static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
+                               struct unity_map_entry *e)
 {
-       struct dma_ops_domain *entry, *ret = NULL;
-       unsigned long flags;
-       u16 alias = amd_iommu_alias_table[devid];
-
-       if (list_empty(&iommu_pd_list))
-               return NULL;
-
-       spin_lock_irqsave(&iommu_pd_list_lock, flags);
+       u64 addr;
 
-       list_for_each_entry(entry, &iommu_pd_list, list) {
-               if (entry->target_dev == devid ||
-                   entry->target_dev == alias) {
-                       ret = entry;
-                       break;
-               }
+       for (addr = e->address_start; addr < e->address_end;
+            addr += PAGE_SIZE) {
+               if (addr < dma_dom->aperture_size)
+                       __set_bit(addr >> PAGE_SHIFT,
+                                 dma_dom->aperture[0]->bitmap);
        }
+}
+
+/*
+ * Inits the unity mappings required for a specific device
+ */
+static void init_unity_mappings_for_device(struct device *dev,
+                                          struct dma_ops_domain *dma_dom)
+{
+       struct unity_map_entry *e;
+       u16 devid;
 
-       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+       devid = get_device_id(dev);
 
-       return ret;
+       list_for_each_entry(e, &amd_iommu_unity_map, list) {
+               if (!(devid >= e->devid_start && devid <= e->devid_end))
+                       continue;
+               alloc_unity_mapping(dma_dom, e);
+       }
 }
 
 /*
@@ -290,11 +293,23 @@ static bool check_device(struct device *dev)
 
 static void init_iommu_group(struct device *dev)
 {
+       struct dma_ops_domain *dma_domain;
+       struct iommu_domain *domain;
        struct iommu_group *group;
 
        group = iommu_group_get_for_dev(dev);
-       if (!IS_ERR(group))
-               iommu_group_put(group);
+       if (IS_ERR(group))
+               return;
+
+       domain = iommu_group_default_domain(group);
+       if (!domain)
+               goto out;
+
+       dma_domain = to_pdomain(domain)->priv;
+
+       init_unity_mappings_for_device(dev, dma_domain);
+out:
+       iommu_group_put(group);
 }
 
 static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -434,64 +449,15 @@ static void iommu_uninit_device(struct device *dev)
        /* Unlink from alias, it may change if another device is re-plugged */
        dev_data->alias_data = NULL;
 
+       /* Remove dma-ops */
+       dev->archdata.dma_ops = NULL;
+
        /*
         * We keep dev_data around for unplugged devices and reuse it when the
         * device is re-plugged - not doing so would introduce a ton of races.
         */
 }
 
-void __init amd_iommu_uninit_devices(void)
-{
-       struct iommu_dev_data *dev_data, *n;
-       struct pci_dev *pdev = NULL;
-
-       for_each_pci_dev(pdev) {
-
-               if (!check_device(&pdev->dev))
-                       continue;
-
-               iommu_uninit_device(&pdev->dev);
-       }
-
-       /* Free all of our dev_data structures */
-       list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
-               free_dev_data(dev_data);
-}
-
-int __init amd_iommu_init_devices(void)
-{
-       struct pci_dev *pdev = NULL;
-       int ret = 0;
-
-       for_each_pci_dev(pdev) {
-
-               if (!check_device(&pdev->dev))
-                       continue;
-
-               ret = iommu_init_device(&pdev->dev);
-               if (ret == -ENOTSUPP)
-                       iommu_ignore_device(&pdev->dev);
-               else if (ret)
-                       goto out_free;
-       }
-
-       /*
-        * Initialize IOMMU groups only after iommu_init_device() has
-        * had a chance to populate any IVRS defined aliases.
-        */
-       for_each_pci_dev(pdev) {
-               if (check_device(&pdev->dev))
-                       init_iommu_group(&pdev->dev);
-       }
-
-       return 0;
-
-out_free:
-
-       amd_iommu_uninit_devices();
-
-       return ret;
-}
 #ifdef CONFIG_AMD_IOMMU_STATS
 
 /*
@@ -1463,94 +1429,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
        return unmapped;
 }
 
-/*
- * This function checks if a specific unity mapping entry is needed for
- * this specific IOMMU.
- */
-static int iommu_for_unity_map(struct amd_iommu *iommu,
-                              struct unity_map_entry *entry)
-{
-       u16 bdf, i;
-
-       for (i = entry->devid_start; i <= entry->devid_end; ++i) {
-               bdf = amd_iommu_alias_table[i];
-               if (amd_iommu_rlookup_table[bdf] == iommu)
-                       return 1;
-       }
-
-       return 0;
-}
-
-/*
- * This function actually applies the mapping to the page table of the
- * dma_ops domain.
- */
-static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
-                            struct unity_map_entry *e)
-{
-       u64 addr;
-       int ret;
-
-       for (addr = e->address_start; addr < e->address_end;
-            addr += PAGE_SIZE) {
-               ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
-                                    PAGE_SIZE);
-               if (ret)
-                       return ret;
-               /*
-                * if unity mapping is in aperture range mark the page
-                * as allocated in the aperture
-                */
-               if (addr < dma_dom->aperture_size)
-                       __set_bit(addr >> PAGE_SHIFT,
-                                 dma_dom->aperture[0]->bitmap);
-       }
-
-       return 0;
-}
-
-/*
- * Init the unity mappings for a specific IOMMU in the system
- *
- * Basically iterates over all unity mapping entries and applies them to
- * the default domain DMA of that IOMMU if necessary.
- */
-static int iommu_init_unity_mappings(struct amd_iommu *iommu)
-{
-       struct unity_map_entry *entry;
-       int ret;
-
-       list_for_each_entry(entry, &amd_iommu_unity_map, list) {
-               if (!iommu_for_unity_map(iommu, entry))
-                       continue;
-               ret = dma_ops_unity_map(iommu->default_dom, entry);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-/*
- * Inits the unity mappings required for a specific device
- */
-static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
-                                         u16 devid)
-{
-       struct unity_map_entry *e;
-       int ret;
-
-       list_for_each_entry(e, &amd_iommu_unity_map, list) {
-               if (!(devid >= e->devid_start && devid <= e->devid_end))
-                       continue;
-               ret = dma_ops_unity_map(dma_dom, e);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 /****************************************************************************
  *
  * The next functions belong to the address allocator for the dma_ops
@@ -1704,14 +1582,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
        unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
        int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
        int i = start >> APERTURE_RANGE_SHIFT;
-       unsigned long boundary_size;
+       unsigned long boundary_size, mask;
        unsigned long address = -1;
        unsigned long limit;
 
        next_bit >>= PAGE_SHIFT;
 
-       boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
-                       PAGE_SIZE) >> PAGE_SHIFT;
+       mask = dma_get_seg_boundary(dev);
+
+       boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
+                                  1UL << (BITS_PER_LONG - PAGE_SHIFT);
 
        for (;i < max_index; ++i) {
                unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
@@ -1869,9 +1749,15 @@ static void free_pt_##LVL (unsigned long __pt)                   \
        pt = (u64 *)__pt;                                       \
                                                                \
        for (i = 0; i < 512; ++i) {                             \
+               /* PTE present? */                              \
                if (!IOMMU_PTE_PRESENT(pt[i]))                  \
                        continue;                               \
                                                                \
+               /* Large PTE? */                                \
+               if (PM_PTE_LEVEL(pt[i]) == 0 ||                 \
+                   PM_PTE_LEVEL(pt[i]) == 7)                   \
+                       continue;                               \
+                                                               \
                p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
                FN(p);                                          \
        }                                                       \
@@ -2008,7 +1894,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
                goto free_dma_dom;
 
        dma_dom->need_flush = false;
-       dma_dom->target_dev = 0xffff;
 
        add_domain_to_list(&dma_dom->domain);
 
@@ -2373,110 +2258,67 @@ static void detach_device(struct device *dev)
        dev_data->ats.enabled = false;
 }
 
-/*
- * Find out the protection domain structure for a given PCI device. This
- * will give us the pointer to the page table root for example.
- */
-static struct protection_domain *domain_for_device(struct device *dev)
-{
-       struct iommu_dev_data *dev_data;
-       struct protection_domain *dom = NULL;
-       unsigned long flags;
-
-       dev_data   = get_dev_data(dev);
-
-       if (dev_data->domain)
-               return dev_data->domain;
-
-       if (dev_data->alias_data != NULL) {
-               struct iommu_dev_data *alias_data = dev_data->alias_data;
-
-               read_lock_irqsave(&amd_iommu_devtable_lock, flags);
-               if (alias_data->domain != NULL) {
-                       __attach_device(dev_data, alias_data->domain);
-                       dom = alias_data->domain;
-               }
-               read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-       }
-
-       return dom;
-}
-
-static int device_change_notifier(struct notifier_block *nb,
-                                 unsigned long action, void *data)
+static int amd_iommu_add_device(struct device *dev)
 {
-       struct dma_ops_domain *dma_domain;
-       struct protection_domain *domain;
        struct iommu_dev_data *dev_data;
-       struct device *dev = data;
+       struct iommu_domain *domain;
        struct amd_iommu *iommu;
-       unsigned long flags;
        u16 devid;
+       int ret;
 
-       if (!check_device(dev))
+       if (!check_device(dev) || get_dev_data(dev))
                return 0;
 
-       devid    = get_device_id(dev);
-       iommu    = amd_iommu_rlookup_table[devid];
-       dev_data = get_dev_data(dev);
-
-       switch (action) {
-       case BUS_NOTIFY_ADD_DEVICE:
+       devid = get_device_id(dev);
+       iommu = amd_iommu_rlookup_table[devid];
 
-               iommu_init_device(dev);
-               init_iommu_group(dev);
+       ret = iommu_init_device(dev);
+       if (ret) {
+               if (ret != -ENOTSUPP)
+                       pr_err("Failed to initialize device %s - trying to proceed anyway\n",
+                               dev_name(dev));
 
-               /*
-                * dev_data is still NULL and
-                * got initialized in iommu_init_device
-                */
-               dev_data = get_dev_data(dev);
+               iommu_ignore_device(dev);
+               dev->archdata.dma_ops = &nommu_dma_ops;
+               goto out;
+       }
+       init_iommu_group(dev);
 
-               if (iommu_pass_through || dev_data->iommu_v2) {
-                       dev_data->passthrough = true;
-                       attach_device(dev, pt_domain);
-                       break;
-               }
+       dev_data = get_dev_data(dev);
 
-               domain = domain_for_device(dev);
+       BUG_ON(!dev_data);
 
-               /* allocate a protection domain if a device is added */
-               dma_domain = find_protection_domain(devid);
-               if (!dma_domain) {
-                       dma_domain = dma_ops_domain_alloc();
-                       if (!dma_domain)
-                               goto out;
-                       dma_domain->target_dev = devid;
-
-                       spin_lock_irqsave(&iommu_pd_list_lock, flags);
-                       list_add_tail(&dma_domain->list, &iommu_pd_list);
-                       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-               }
+       if (dev_data->iommu_v2)
+               iommu_request_dm_for_dev(dev);
 
+       /* Domains are initialized for this device - have a look what we ended up with */
+       domain = iommu_get_domain_for_dev(dev);
+       if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+               dev_data->passthrough = true;
+               dev->archdata.dma_ops = &nommu_dma_ops;
+       } else {
                dev->archdata.dma_ops = &amd_iommu_dma_ops;
-
-               break;
-       case BUS_NOTIFY_REMOVED_DEVICE:
-
-               iommu_uninit_device(dev);
-
-       default:
-               goto out;
        }
 
+out:
        iommu_completion_wait(iommu);
 
-out:
        return 0;
 }
 
-static struct notifier_block device_nb = {
-       .notifier_call = device_change_notifier,
-};
-
-void amd_iommu_init_notifier(void)
+static void amd_iommu_remove_device(struct device *dev)
 {
-       bus_register_notifier(&pci_bus_type, &device_nb);
+       struct amd_iommu *iommu;
+       u16 devid;
+
+       if (!check_device(dev))
+               return;
+
+       devid = get_device_id(dev);
+       iommu = amd_iommu_rlookup_table[devid];
+
+       iommu_uninit_device(dev);
+       iommu_completion_wait(iommu);
 }
 
 /*****************************************************************************
@@ -2495,28 +2337,20 @@ void amd_iommu_init_notifier(void)
 static struct protection_domain *get_domain(struct device *dev)
 {
        struct protection_domain *domain;
-       struct dma_ops_domain *dma_dom;
-       u16 devid = get_device_id(dev);
+       struct iommu_domain *io_domain;
 
        if (!check_device(dev))
                return ERR_PTR(-EINVAL);
 
-       domain = domain_for_device(dev);
-       if (domain != NULL && !dma_ops_domain(domain))
-               return ERR_PTR(-EBUSY);
-
-       if (domain != NULL)
-               return domain;
+       io_domain = iommu_get_domain_for_dev(dev);
+       if (!io_domain)
+               return NULL;
 
-       /* Device not bound yet - bind it */
-       dma_dom = find_protection_domain(devid);
-       if (!dma_dom)
-               dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
-       attach_device(dev, &dma_dom->domain);
-       DUMP_printk("Using protection domain %d for device %s\n",
-                   dma_dom->domain.id, dev_name(dev));
+       domain = to_pdomain(io_domain);
+       if (!dma_ops_domain(domain))
+               return ERR_PTR(-EBUSY);
 
-       return &dma_dom->domain;
+       return domain;
 }
 
 static void update_device_table(struct protection_domain *domain)
@@ -2930,6 +2764,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
        size      = PAGE_ALIGN(size);
        dma_mask  = dev->coherent_dma_mask;
        flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+       flag     |= __GFP_ZERO;
 
        page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
        if (!page) {
@@ -3011,54 +2846,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
        return check_device(dev);
 }
 
-/*
- * The function for pre-allocating protection domains.
- *
- * If the driver core informs the DMA layer if a driver grabs a device
- * we don't need to preallocate the protection domains anymore.
- * For now we have to.
- */
-static void __init prealloc_protection_domains(void)
-{
-       struct iommu_dev_data *dev_data;
-       struct dma_ops_domain *dma_dom;
-       struct pci_dev *dev = NULL;
-       u16 devid;
-
-       for_each_pci_dev(dev) {
-
-               /* Do we handle this device? */
-               if (!check_device(&dev->dev))
-                       continue;
-
-               dev_data = get_dev_data(&dev->dev);
-               if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
-                       /* Make sure passthrough domain is allocated */
-                       alloc_passthrough_domain();
-                       dev_data->passthrough = true;
-                       attach_device(&dev->dev, pt_domain);
-                       pr_info("AMD-Vi: Using passthrough domain for device %s\n",
-                               dev_name(&dev->dev));
-               }
-
-               /* Is there already any domain for it? */
-               if (domain_for_device(&dev->dev))
-                       continue;
-
-               devid = get_device_id(&dev->dev);
-
-               dma_dom = dma_ops_domain_alloc();
-               if (!dma_dom)
-                       continue;
-               init_unity_mappings_for_device(dma_dom, devid);
-               dma_dom->target_dev = devid;
-
-               attach_device(&dev->dev, &dma_dom->domain);
-
-               list_add_tail(&dma_dom->list, &iommu_pd_list);
-       }
-}
-
 static struct dma_map_ops amd_iommu_dma_ops = {
        .alloc = alloc_coherent,
        .free = free_coherent,
@@ -3069,76 +2856,16 @@ static struct dma_map_ops amd_iommu_dma_ops = {
        .dma_supported = amd_iommu_dma_supported,
 };
 
-static unsigned device_dma_ops_init(void)
-{
-       struct iommu_dev_data *dev_data;
-       struct pci_dev *pdev = NULL;
-       unsigned unhandled = 0;
-
-       for_each_pci_dev(pdev) {
-               if (!check_device(&pdev->dev)) {
-
-                       iommu_ignore_device(&pdev->dev);
-
-                       unhandled += 1;
-                       continue;
-               }
-
-               dev_data = get_dev_data(&pdev->dev);
-
-               if (!dev_data->passthrough)
-                       pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
-               else
-                       pdev->dev.archdata.dma_ops = &nommu_dma_ops;
-       }
-
-       return unhandled;
-}
-
-/*
- * The function which clues the AMD IOMMU driver into dma_ops.
- */
-
-void __init amd_iommu_init_api(void)
+int __init amd_iommu_init_api(void)
 {
-       bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+       return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
 }
 
 int __init amd_iommu_init_dma_ops(void)
 {
-       struct amd_iommu *iommu;
-       int ret, unhandled;
-
-       /*
-        * first allocate a default protection domain for every IOMMU we
-        * found in the system. Devices not assigned to any other
-        * protection domain will be assigned to the default one.
-        */
-       for_each_iommu(iommu) {
-               iommu->default_dom = dma_ops_domain_alloc();
-               if (iommu->default_dom == NULL)
-                       return -ENOMEM;
-               iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
-               ret = iommu_init_unity_mappings(iommu);
-               if (ret)
-                       goto free_domains;
-       }
-
-       /*
-        * Pre-allocate the protection domains for each device.
-        */
-       prealloc_protection_domains();
-
        iommu_detected = 1;
        swiotlb = 0;
 
-       /* Make the driver finally visible to the drivers */
-       unhandled = device_dma_ops_init();
-       if (unhandled && max_pfn > MAX_DMA32_PFN) {
-               /* There are unhandled devices - initialize swiotlb for them */
-               swiotlb = 1;
-       }
-
        amd_iommu_stats_init();
 
        if (amd_iommu_unmap_flush)
@@ -3147,14 +2874,6 @@ int __init amd_iommu_init_dma_ops(void)
                pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
        return 0;
-
-free_domains:
-
-       for_each_iommu(iommu) {
-               dma_ops_domain_free(iommu->default_dom);
-       }
-
-       return ret;
 }
 
 /*****************************************************************************
@@ -3221,7 +2940,7 @@ out_err:
        return NULL;
 }
 
-static int __init alloc_passthrough_domain(void)
+static int alloc_passthrough_domain(void)
 {
        if (pt_domain != NULL)
                return 0;
@@ -3239,30 +2958,46 @@ static int __init alloc_passthrough_domain(void)
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
        struct protection_domain *pdomain;
+       struct dma_ops_domain *dma_domain;
 
-       /* We only support unmanaged domains for now */
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
-       pdomain = protection_domain_alloc();
-       if (!pdomain)
-               goto out_free;
+       switch (type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               pdomain = protection_domain_alloc();
+               if (!pdomain)
+                       return NULL;
 
-       pdomain->mode    = PAGE_MODE_3_LEVEL;
-       pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-       if (!pdomain->pt_root)
-               goto out_free;
+               pdomain->mode    = PAGE_MODE_3_LEVEL;
+               pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pdomain->pt_root) {
+                       protection_domain_free(pdomain);
+                       return NULL;
+               }
 
-       pdomain->domain.geometry.aperture_start = 0;
-       pdomain->domain.geometry.aperture_end   = ~0ULL;
-       pdomain->domain.geometry.force_aperture = true;
+               pdomain->domain.geometry.aperture_start = 0;
+               pdomain->domain.geometry.aperture_end   = ~0ULL;
+               pdomain->domain.geometry.force_aperture = true;
 
-       return &pdomain->domain;
+               break;
+       case IOMMU_DOMAIN_DMA:
+               dma_domain = dma_ops_domain_alloc();
+               if (!dma_domain) {
+                       pr_err("AMD-Vi: Failed to allocate\n");
+                       return NULL;
+               }
+               pdomain = &dma_domain->domain;
+               break;
+       case IOMMU_DOMAIN_IDENTITY:
+               pdomain = protection_domain_alloc();
+               if (!pdomain)
+                       return NULL;
 
-out_free:
-       protection_domain_free(pdomain);
+               pdomain->mode = PAGE_MODE_NONE;
+               break;
+       default:
+               return NULL;
+       }
 
-       return NULL;
+       return &pdomain->domain;
 }
 
 static void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -3412,6 +3147,47 @@ static bool amd_iommu_capable(enum iommu_cap cap)
        return false;
 }
 
+static void amd_iommu_get_dm_regions(struct device *dev,
+                                    struct list_head *head)
+{
+       struct unity_map_entry *entry;
+       u16 devid;
+
+       devid = get_device_id(dev);
+
+       list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+               struct iommu_dm_region *region;
+
+               if (devid < entry->devid_start || devid > entry->devid_end)
+                       continue;
+
+               region = kzalloc(sizeof(*region), GFP_KERNEL);
+               if (!region) {
+                       pr_err("Out of memory allocating dm-regions for %s\n",
+                               dev_name(dev));
+                       return;
+               }
+
+               region->start = entry->address_start;
+               region->length = entry->address_end - entry->address_start;
+               if (entry->prot & IOMMU_PROT_IR)
+                       region->prot |= IOMMU_READ;
+               if (entry->prot & IOMMU_PROT_IW)
+                       region->prot |= IOMMU_WRITE;
+
+               list_add_tail(&region->list, head);
+       }
+}
+
+static void amd_iommu_put_dm_regions(struct device *dev,
+                                    struct list_head *head)
+{
+       struct iommu_dm_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, list)
+               kfree(entry);
+}
+
 static const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
@@ -3422,6 +3198,10 @@ static const struct iommu_ops amd_iommu_ops = {
        .unmap = amd_iommu_unmap,
        .map_sg = default_iommu_map_sg,
        .iova_to_phys = amd_iommu_iova_to_phys,
+       .add_device = amd_iommu_add_device,
+       .remove_device = amd_iommu_remove_device,
+       .get_dm_regions = amd_iommu_get_dm_regions,
+       .put_dm_regions = amd_iommu_put_dm_regions,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
 };
 
index 450ef5001a65ab3bea19e1a9648324eea9951ede..dbac49cea7a143d3e510025be467e70540d518bb 100644 (file)
@@ -226,6 +226,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
 
 static int amd_iommu_enable_interrupts(void);
 static int __init iommu_go_to_state(enum iommu_init_state state);
+static void init_device_table_dma(void);
 
 static inline void update_last_devid(u16 devid)
 {
@@ -1385,9 +1386,15 @@ static int __init amd_iommu_init_pci(void)
                        break;
        }
 
-       ret = amd_iommu_init_devices();
+       init_device_table_dma();
+
+       for_each_iommu(iommu)
+               iommu_flush_all_caches(iommu);
+
+       ret = amd_iommu_init_api();
 
-       print_iommu_info();
+       if (!ret)
+               print_iommu_info();
 
        return ret;
 }
@@ -1825,8 +1832,6 @@ static bool __init check_ioapic_information(void)
 
 static void __init free_dma_resources(void)
 {
-       amd_iommu_uninit_devices();
-
        free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
                   get_order(MAX_DOMAIN_ID/8));
 
@@ -2019,27 +2024,10 @@ static bool detect_ivrs(void)
 
 static int amd_iommu_init_dma(void)
 {
-       struct amd_iommu *iommu;
-       int ret;
-
        if (iommu_pass_through)
-               ret = amd_iommu_init_passthrough();
+               return amd_iommu_init_passthrough();
        else
-               ret = amd_iommu_init_dma_ops();
-
-       if (ret)
-               return ret;
-
-       init_device_table_dma();
-
-       for_each_iommu(iommu)
-               iommu_flush_all_caches(iommu);
-
-       amd_iommu_init_api();
-
-       amd_iommu_init_notifier();
-
-       return 0;
+               return amd_iommu_init_dma_ops();
 }
 
 /****************************************************************************
index 72b0fd455e2444cc12a9ee2b678c6b0456c80503..9ed1c43305519842e23d4e8fa88f24bb3911ff34 100644 (file)
@@ -30,7 +30,7 @@ extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
 extern int amd_iommu_init_devices(void);
 extern void amd_iommu_uninit_devices(void);
 extern void amd_iommu_init_notifier(void);
-extern void amd_iommu_init_api(void);
+extern int amd_iommu_init_api(void);
 
 /* Needed for interrupt remapping */
 extern int amd_iommu_prepare(void);
index 05030e523771a6ee3befbe890ed45b47a86f8f7f..bb56560ac6ca9250a2f43e8d32768cdff061938b 100644 (file)
@@ -446,8 +446,6 @@ struct aperture_range {
  * Data container for a dma_ops specific protection domain
  */
 struct dma_ops_domain {
-       struct list_head list;
-
        /* generic protection domain information */
        struct protection_domain domain;
 
@@ -462,12 +460,6 @@ struct dma_ops_domain {
 
        /* This will be set to true when TLB needs to be flushed */
        bool need_flush;
-
-       /*
-        * if this is a preallocated domain, keep the device for which it was
-        * preallocated in this variable
-        */
-       u16 target_dev;
 };
 
 /*
@@ -552,9 +544,6 @@ struct amd_iommu {
        /* if one, we need to send a completion wait command */
        bool need_sync;
 
-       /* default dma_ops domain for that IOMMU */
-       struct dma_ops_domain *default_dom;
-
        /* IOMMU sysfs device */
        struct device *iommu_dev;
 
index 9847613085e157976707e0d1aa0cc87c3e8b3c68..c5886582b64fbb629d0cdacfcb96ba41443d3985 100644 (file)
@@ -26,7 +26,7 @@
  * These routines are used by both DMA-remapping and Interrupt-remapping
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
+#define pr_fmt(fmt)     "DMAR: " fmt
 
 #include <linux/pci.h>
 #include <linux/dmar.h>
@@ -555,7 +555,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
                        break;
                } else if (next > end) {
                        /* Avoid passing table end */
-                       pr_warn(FW_BUG "record passes table end\n");
+                       pr_warn(FW_BUG "Record passes table end\n");
                        ret = -EINVAL;
                        break;
                }
@@ -802,7 +802,7 @@ int __init dmar_table_init(void)
                ret = parse_dmar_table();
                if (ret < 0) {
                        if (ret != -ENODEV)
-                               pr_info("parse DMAR table failure.\n");
+                               pr_info("Parse DMAR table failure.\n");
                } else  if (list_empty(&dmar_drhd_units)) {
                        pr_info("No DMAR devices found\n");
                        ret = -ENODEV;
@@ -847,7 +847,7 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
        else
                addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
        if (!addr) {
-               pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
+               pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
                return -EINVAL;
        }
 
@@ -921,14 +921,14 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
        iommu->reg_size = VTD_PAGE_SIZE;
 
        if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
-               pr_err("IOMMU: can't reserve memory\n");
+               pr_err("Can't reserve memory\n");
                err = -EBUSY;
                goto out;
        }
 
        iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
        if (!iommu->reg) {
-               pr_err("IOMMU: can't map the region\n");
+               pr_err("Can't map the region\n");
                err = -ENOMEM;
                goto release;
        }
@@ -952,13 +952,13 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
                iommu->reg_size = map_size;
                if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
                                        iommu->name)) {
-                       pr_err("IOMMU: can't reserve memory\n");
+                       pr_err("Can't reserve memory\n");
                        err = -EBUSY;
                        goto out;
                }
                iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
                if (!iommu->reg) {
-                       pr_err("IOMMU: can't map the region\n");
+                       pr_err("Can't map the region\n");
                        err = -ENOMEM;
                        goto release;
                }
@@ -1014,14 +1014,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
                return -ENOMEM;
 
        if (dmar_alloc_seq_id(iommu) < 0) {
-               pr_err("IOMMU: failed to allocate seq_id\n");
+               pr_err("Failed to allocate seq_id\n");
                err = -ENOSPC;
                goto error;
        }
 
        err = map_iommu(iommu, drhd->reg_base_addr);
        if (err) {
-               pr_err("IOMMU: failed to map %s\n", iommu->name);
+               pr_err("Failed to map %s\n", iommu->name);
                goto error_free_seq_id;
        }
 
@@ -1045,8 +1045,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        iommu->node = -1;
 
        ver = readl(iommu->reg + DMAR_VER_REG);
-       pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
-               iommu->seq_id,
+       pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+               iommu->name,
                (unsigned long long)drhd->reg_base_addr,
                DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
                (unsigned long long)iommu->cap,
@@ -1644,7 +1644,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 
        irq = dmar_alloc_hwirq();
        if (irq <= 0) {
-               pr_err("IOMMU: no free vectors\n");
+               pr_err("No free IRQ vectors\n");
                return -EINVAL;
        }
 
@@ -1661,7 +1661,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 
        ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
-               pr_err("IOMMU: can't request irq\n");
+               pr_err("Can't request irq\n");
        return ret;
 }
 
index 3e898504a7c45aee654857ab946d64380a1a1d0f..97c41b8ab5d980667667bde15ad1d16105278b65 100644 (file)
 #define DEBUG
 #endif
 
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
-#include <linux/mm.h>
+#include <linux/io.h>
 #include <linux/iommu.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
 #include <linux/list.h>
-#include <linux/memblock.h>
-#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_iommu.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
 
 #include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
 #include <asm/pgtable.h>
 
 typedef u32 sysmmu_iova_t;
@@ -184,35 +185,50 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
        "UNKNOWN FAULT"
 };
 
-/* attached to dev.archdata.iommu of the master device */
+/*
+ * This structure is attached to dev.archdata.iommu of the master device
+ * on device add, contains a list of SYSMMU controllers defined by device tree,
+ * which are bound to given master device. It is usually referenced by 'owner'
+ * pointer.
+*/
 struct exynos_iommu_owner {
-       struct list_head client; /* entry of exynos_iommu_domain.clients */
-       struct device *dev;
-       struct device *sysmmu;
-       struct iommu_domain *domain;
-       void *vmm_data;         /* IO virtual memory manager's data */
-       spinlock_t lock;        /* Lock to preserve consistency of System MMU */
+       struct list_head controllers;   /* list of sysmmu_drvdata.owner_node */
 };
 
+/*
+ * This structure exynos specific generalization of struct iommu_domain.
+ * It contains list of SYSMMU controllers from all master devices, which has
+ * been attached to this domain and page tables of IO address space defined by
+ * it. It is usually referenced by 'domain' pointer.
+ */
 struct exynos_iommu_domain {
-       struct list_head clients; /* list of sysmmu_drvdata.node */
-       sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
-       short *lv2entcnt; /* free lv2 entry counter for each section */
-       spinlock_t lock; /* lock for this structure */
-       spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+       struct list_head clients; /* list of sysmmu_drvdata.domain_node */
+       sysmmu_pte_t *pgtable;  /* lv1 page table, 16KB */
+       short *lv2entcnt;       /* free lv2 entry counter for each section */
+       spinlock_t lock;        /* lock for modyfying list of clients */
+       spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
        struct iommu_domain domain; /* generic domain data structure */
 };
 
+/*
+ * This structure hold all data of a single SYSMMU controller, this includes
+ * hw resources like registers and clocks, pointers and list nodes to connect
+ * it to all other structures, internal state and parameters read from device
+ * tree. It is usually referenced by 'data' pointer.
+ */
 struct sysmmu_drvdata {
-       struct device *sysmmu;  /* System MMU's device descriptor */
-       struct device *master;  /* Owner of system MMU */
-       void __iomem *sfrbase;
-       struct clk *clk;
-       struct clk *clk_master;
-       int activations;
-       spinlock_t lock;
-       struct iommu_domain *domain;
-       phys_addr_t pgtable;
+       struct device *sysmmu;          /* SYSMMU controller device */
+       struct device *master;          /* master device (owner) */
+       void __iomem *sfrbase;          /* our registers */
+       struct clk *clk;                /* SYSMMU's clock */
+       struct clk *clk_master;         /* master's device clock */
+       int activations;                /* number of calls to sysmmu_enable */
+       spinlock_t lock;                /* lock for modyfying state */
+       struct exynos_iommu_domain *domain; /* domain we belong to */
+       struct list_head domain_node;   /* node for domain clients list */
+       struct list_head owner_node;    /* node for owner controllers list */
+       phys_addr_t pgtable;            /* assigned page table structure */
+       unsigned int version;           /* our version */
 };
 
 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -244,11 +260,6 @@ static void sysmmu_unblock(void __iomem *sfrbase)
        __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
 }
 
-static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
-{
-       return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
-}
-
 static bool sysmmu_block(void __iomem *sfrbase)
 {
        int i = 120;
@@ -345,7 +356,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
                show_fault_information(dev_name(data->sysmmu),
                                        itype, base, addr);
                if (data->domain)
-                       ret = report_iommu_fault(data->domain,
+                       ret = report_iommu_fault(&data->domain->domain,
                                        data->master, addr, itype);
        }
 
@@ -408,7 +419,7 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
        unsigned int cfg = CFG_LRU | CFG_QOS(15);
        unsigned int ver;
 
-       ver = __raw_sysmmu_version(data);
+       ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
        if (MMU_MAJ_VER(ver) == 3) {
                if (MMU_MIN_VER(ver) >= 2) {
                        cfg |= CFG_FLPDCACHE;
@@ -422,6 +433,7 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
        }
 
        __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
+       data->version = ver;
 }
 
 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
@@ -442,8 +454,8 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
                clk_disable(data->clk_master);
 }
 
-static int __sysmmu_enable(struct sysmmu_drvdata *data,
-                       phys_addr_t pgtable, struct iommu_domain *domain)
+static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
+                          struct exynos_iommu_domain *domain)
 {
        int ret = 0;
        unsigned long flags;
@@ -470,77 +482,17 @@ static int __sysmmu_enable(struct sysmmu_drvdata *data,
        return ret;
 }
 
-/* __exynos_sysmmu_enable: Enables System MMU
- *
- * returns -error if an error occurred and System MMU is not enabled,
- * 0 if the System MMU has been just enabled and 1 if System MMU was already
- * enabled before.
- */
-static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
-                                 struct iommu_domain *domain)
-{
-       int ret = 0;
-       unsigned long flags;
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
-       struct sysmmu_drvdata *data;
-
-       BUG_ON(!has_sysmmu(dev));
-
-       spin_lock_irqsave(&owner->lock, flags);
-
-       data = dev_get_drvdata(owner->sysmmu);
-
-       ret = __sysmmu_enable(data, pgtable, domain);
-       if (ret >= 0)
-               data->master = dev;
-
-       spin_unlock_irqrestore(&owner->lock, flags);
-
-       return ret;
-}
-
-int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
-{
-       BUG_ON(!memblock_is_memory(pgtable));
-
-       return __exynos_sysmmu_enable(dev, pgtable, NULL);
-}
-
-static bool exynos_sysmmu_disable(struct device *dev)
-{
-       unsigned long flags;
-       bool disabled = true;
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
-       struct sysmmu_drvdata *data;
-
-       BUG_ON(!has_sysmmu(dev));
-
-       spin_lock_irqsave(&owner->lock, flags);
-
-       data = dev_get_drvdata(owner->sysmmu);
-
-       disabled = __sysmmu_disable(data);
-       if (disabled)
-               data->master = NULL;
-
-       spin_unlock_irqrestore(&owner->lock, flags);
-
-       return disabled;
-}
-
 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
                                              sysmmu_iova_t iova)
 {
-       if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
+       if (data->version == MAKE_MMU_VER(3, 3))
                __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
 }
 
-static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
+static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
                                            sysmmu_iova_t iova)
 {
        unsigned long flags;
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
-       struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
 
        if (!IS_ERR(data->clk_master))
                clk_enable(data->clk_master);
@@ -554,14 +506,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
                clk_disable(data->clk_master);
 }
 
-static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
-                                       size_t size)
+static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
+                                       sysmmu_iova_t iova, size_t size)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        unsigned long flags;
-       struct sysmmu_drvdata *data;
-
-       data = dev_get_drvdata(owner->sysmmu);
 
        spin_lock_irqsave(&data->lock, flags);
        if (is_sysmmu_active(data)) {
@@ -580,7 +528,7 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
                 * 1MB page can be cached in one of all sets.
                 * 64KB page can be one of 16 consecutive sets.
                 */
-               if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
+               if (MMU_MAJ_VER(data->version) == 2)
                        num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
 
                if (sysmmu_block(data->sfrbase)) {
@@ -591,32 +539,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
                if (!IS_ERR(data->clk_master))
                        clk_disable(data->clk_master);
        } else {
-               dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
-                       iova);
-       }
-       spin_unlock_irqrestore(&data->lock, flags);
-}
-
-void exynos_sysmmu_tlb_invalidate(struct device *dev)
-{
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
-       unsigned long flags;
-       struct sysmmu_drvdata *data;
-
-       data = dev_get_drvdata(owner->sysmmu);
-
-       spin_lock_irqsave(&data->lock, flags);
-       if (is_sysmmu_active(data)) {
-               if (!IS_ERR(data->clk_master))
-                       clk_enable(data->clk_master);
-               if (sysmmu_block(data->sfrbase)) {
-                       __sysmmu_tlb_invalidate(data->sfrbase);
-                       sysmmu_unblock(data->sfrbase);
-               }
-               if (!IS_ERR(data->clk_master))
-                       clk_disable(data->clk_master);
-       } else {
-               dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
+               dev_dbg(data->master,
+                       "disabled. Skipping TLB invalidation @ %#x\n", iova);
        }
        spin_unlock_irqrestore(&data->lock, flags);
 }
@@ -682,6 +606,36 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int exynos_sysmmu_suspend(struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "suspend\n");
+       if (is_sysmmu_active(data)) {
+               __sysmmu_disable_nocount(data);
+               pm_runtime_put(dev);
+       }
+       return 0;
+}
+
+static int exynos_sysmmu_resume(struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "resume\n");
+       if (is_sysmmu_active(data)) {
+               pm_runtime_get_sync(dev);
+               __sysmmu_enable_nocount(data);
+       }
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops sysmmu_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
+};
+
 static const struct of_device_id sysmmu_of_match[] __initconst = {
        { .compatible   = "samsung,exynos-sysmmu", },
        { },
@@ -692,6 +646,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
        .driver = {
                .name           = "exynos-sysmmu",
                .of_match_table = sysmmu_of_match,
+               .pm             = &sysmmu_pm_ops,
        }
 };
 
@@ -704,104 +659,108 @@ static inline void pgtable_flush(void *vastart, void *vaend)
 
 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 {
-       struct exynos_iommu_domain *exynos_domain;
+       struct exynos_iommu_domain *domain;
        int i;
 
        if (type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
 
-       exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
-       if (!exynos_domain)
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
                return NULL;
 
-       exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
-       if (!exynos_domain->pgtable)
+       domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
+       if (!domain->pgtable)
                goto err_pgtable;
 
-       exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
-       if (!exynos_domain->lv2entcnt)
+       domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+       if (!domain->lv2entcnt)
                goto err_counter;
 
        /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
        for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
-               exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
-               exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
+               domain->pgtable[i + 0] = ZERO_LV2LINK;
+               domain->pgtable[i + 1] = ZERO_LV2LINK;
+               domain->pgtable[i + 2] = ZERO_LV2LINK;
+               domain->pgtable[i + 3] = ZERO_LV2LINK;
+               domain->pgtable[i + 4] = ZERO_LV2LINK;
+               domain->pgtable[i + 5] = ZERO_LV2LINK;
+               domain->pgtable[i + 6] = ZERO_LV2LINK;
+               domain->pgtable[i + 7] = ZERO_LV2LINK;
        }
 
-       pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
+       pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
 
-       spin_lock_init(&exynos_domain->lock);
-       spin_lock_init(&exynos_domain->pgtablelock);
-       INIT_LIST_HEAD(&exynos_domain->clients);
+       spin_lock_init(&domain->lock);
+       spin_lock_init(&domain->pgtablelock);
+       INIT_LIST_HEAD(&domain->clients);
 
-       exynos_domain->domain.geometry.aperture_start = 0;
-       exynos_domain->domain.geometry.aperture_end   = ~0UL;
-       exynos_domain->domain.geometry.force_aperture = true;
+       domain->domain.geometry.aperture_start = 0;
+       domain->domain.geometry.aperture_end   = ~0UL;
+       domain->domain.geometry.force_aperture = true;
 
-       return &exynos_domain->domain;
+       return &domain->domain;
 
 err_counter:
-       free_pages((unsigned long)exynos_domain->pgtable, 2);
+       free_pages((unsigned long)domain->pgtable, 2);
 err_pgtable:
-       kfree(exynos_domain);
+       kfree(domain);
        return NULL;
 }
 
-static void exynos_iommu_domain_free(struct iommu_domain *domain)
+static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 {
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
-       struct exynos_iommu_owner *owner;
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       struct sysmmu_drvdata *data, *next;
        unsigned long flags;
        int i;
 
-       WARN_ON(!list_empty(&priv->clients));
+       WARN_ON(!list_empty(&domain->clients));
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&domain->lock, flags);
 
-       list_for_each_entry(owner, &priv->clients, client) {
-               while (!exynos_sysmmu_disable(owner->dev))
-                       ; /* until System MMU is actually disabled */
+       list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
+               if (__sysmmu_disable(data))
+                       data->master = NULL;
+               list_del_init(&data->domain_node);
        }
 
-       while (!list_empty(&priv->clients))
-               list_del_init(priv->clients.next);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        for (i = 0; i < NUM_LV1ENTRIES; i++)
-               if (lv1ent_page(priv->pgtable + i))
+               if (lv1ent_page(domain->pgtable + i))
                        kmem_cache_free(lv2table_kmem_cache,
-                               phys_to_virt(lv2table_base(priv->pgtable + i)));
+                               phys_to_virt(lv2table_base(domain->pgtable + i)));
 
-       free_pages((unsigned long)priv->pgtable, 2);
-       free_pages((unsigned long)priv->lv2entcnt, 1);
-       kfree(priv);
+       free_pages((unsigned long)domain->pgtable, 2);
+       free_pages((unsigned long)domain->lv2entcnt, 1);
+       kfree(domain);
 }
 
-static int exynos_iommu_attach_device(struct iommu_domain *domain,
+static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
                                   struct device *dev)
 {
        struct exynos_iommu_owner *owner = dev->archdata.iommu;
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
-       phys_addr_t pagetable = virt_to_phys(priv->pgtable);
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       struct sysmmu_drvdata *data;
+       phys_addr_t pagetable = virt_to_phys(domain->pgtable);
        unsigned long flags;
-       int ret;
+       int ret = -ENODEV;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       if (!has_sysmmu(dev))
+               return -ENODEV;
 
-       ret = __exynos_sysmmu_enable(dev, pagetable, domain);
-       if (ret == 0) {
-               list_add_tail(&owner->client, &priv->clients);
-               owner->domain = domain;
-       }
+       list_for_each_entry(data, &owner->controllers, owner_node) {
+               pm_runtime_get_sync(data->sysmmu);
+               ret = __sysmmu_enable(data, pagetable, domain);
+               if (ret >= 0) {
+                       data->master = dev;
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+                       spin_lock_irqsave(&domain->lock, flags);
+                       list_add_tail(&data->domain_node, &domain->clients);
+                       spin_unlock_irqrestore(&domain->lock, flags);
+               }
+       }
 
        if (ret < 0) {
                dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
@@ -815,36 +774,39 @@ static int exynos_iommu_attach_device(struct iommu_domain *domain,
        return ret;
 }
 
-static void exynos_iommu_detach_device(struct iommu_domain *domain,
+static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
                                    struct device *dev)
 {
-       struct exynos_iommu_owner *owner;
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
-       phys_addr_t pagetable = virt_to_phys(priv->pgtable);
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       phys_addr_t pagetable = virt_to_phys(domain->pgtable);
+       struct sysmmu_drvdata *data, *next;
        unsigned long flags;
+       bool found = false;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       if (!has_sysmmu(dev))
+               return;
 
-       list_for_each_entry(owner, &priv->clients, client) {
-               if (owner == dev->archdata.iommu) {
-                       if (exynos_sysmmu_disable(dev)) {
-                               list_del_init(&owner->client);
-                               owner->domain = NULL;
+       spin_lock_irqsave(&domain->lock, flags);
+       list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
+               if (data->master == dev) {
+                       if (__sysmmu_disable(data)) {
+                               data->master = NULL;
+                               list_del_init(&data->domain_node);
                        }
-                       break;
+                       pm_runtime_put(data->sysmmu);
+                       found = true;
                }
        }
+       spin_unlock_irqrestore(&domain->lock, flags);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (owner == dev->archdata.iommu)
+       if (found)
                dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
                                        __func__, &pagetable);
        else
                dev_err(dev, "%s: No IOMMU is attached\n", __func__);
 }
 
-static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
+static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
                sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
 {
        if (lv1ent_section(sent)) {
@@ -862,6 +824,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
                        return ERR_PTR(-ENOMEM);
 
                *sent = mk_lv1ent_page(virt_to_phys(pent));
+               kmemleak_ignore(pent);
                *pgcounter = NUM_LV2ENTRIES;
                pgtable_flush(pent, pent + NUM_LV2ENTRIES);
                pgtable_flush(sent, sent + 1);
@@ -884,20 +847,19 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
                 * not currently mapped.
                 */
                if (need_flush_flpd_cache) {
-                       struct exynos_iommu_owner *owner;
+                       struct sysmmu_drvdata *data;
 
-                       spin_lock(&priv->lock);
-                       list_for_each_entry(owner, &priv->clients, client)
-                               sysmmu_tlb_invalidate_flpdcache(
-                                                       owner->dev, iova);
-                       spin_unlock(&priv->lock);
+                       spin_lock(&domain->lock);
+                       list_for_each_entry(data, &domain->clients, domain_node)
+                               sysmmu_tlb_invalidate_flpdcache(data, iova);
+                       spin_unlock(&domain->lock);
                }
        }
 
        return page_entry(sent, iova);
 }
 
-static int lv1set_section(struct exynos_iommu_domain *priv,
+static int lv1set_section(struct exynos_iommu_domain *domain,
                          sysmmu_pte_t *sent, sysmmu_iova_t iova,
                          phys_addr_t paddr, short *pgcnt)
 {
@@ -922,17 +884,17 @@ static int lv1set_section(struct exynos_iommu_domain *priv,
 
        pgtable_flush(sent, sent + 1);
 
-       spin_lock(&priv->lock);
+       spin_lock(&domain->lock);
        if (lv1ent_page_zero(sent)) {
-               struct exynos_iommu_owner *owner;
+               struct sysmmu_drvdata *data;
                /*
                 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
                 * entry by speculative prefetch of SLPD which has no mapping.
                 */
-               list_for_each_entry(owner, &priv->clients, client)
-                       sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
+               list_for_each_entry(data, &domain->clients, domain_node)
+                       sysmmu_tlb_invalidate_flpdcache(data, iova);
        }
-       spin_unlock(&priv->lock);
+       spin_unlock(&domain->lock);
 
        return 0;
 }
@@ -992,74 +954,75 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
  *   than or equal to 128KiB.
  * - Start address of an I/O virtual region must be aligned by 128KiB.
  */
-static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
-                        phys_addr_t paddr, size_t size, int prot)
+static int exynos_iommu_map(struct iommu_domain *iommu_domain,
+                           unsigned long l_iova, phys_addr_t paddr, size_t size,
+                           int prot)
 {
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        sysmmu_pte_t *entry;
        sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
        unsigned long flags;
        int ret = -ENOMEM;
 
-       BUG_ON(priv->pgtable == NULL);
+       BUG_ON(domain->pgtable == NULL);
 
-       spin_lock_irqsave(&priv->pgtablelock, flags);
+       spin_lock_irqsave(&domain->pgtablelock, flags);
 
-       entry = section_entry(priv->pgtable, iova);
+       entry = section_entry(domain->pgtable, iova);
 
        if (size == SECT_SIZE) {
-               ret = lv1set_section(priv, entry, iova, paddr,
-                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+               ret = lv1set_section(domain, entry, iova, paddr,
+                                    &domain->lv2entcnt[lv1ent_offset(iova)]);
        } else {
                sysmmu_pte_t *pent;
 
-               pent = alloc_lv2entry(priv, entry, iova,
-                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+               pent = alloc_lv2entry(domain, entry, iova,
+                                     &domain->lv2entcnt[lv1ent_offset(iova)]);
 
                if (IS_ERR(pent))
                        ret = PTR_ERR(pent);
                else
                        ret = lv2set_page(pent, paddr, size,
-                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+                                      &domain->lv2entcnt[lv1ent_offset(iova)]);
        }
 
        if (ret)
                pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
                        __func__, ret, size, iova);
 
-       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+       spin_unlock_irqrestore(&domain->pgtablelock, flags);
 
        return ret;
 }
 
-static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
-                                               sysmmu_iova_t iova, size_t size)
+static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
+                                             sysmmu_iova_t iova, size_t size)
 {
-       struct exynos_iommu_owner *owner;
+       struct sysmmu_drvdata *data;
        unsigned long flags;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&domain->lock, flags);
 
-       list_for_each_entry(owner, &priv->clients, client)
-               sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
+       list_for_each_entry(data, &domain->clients, domain_node)
+               sysmmu_tlb_invalidate_entry(data, iova, size);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
-static size_t exynos_iommu_unmap(struct iommu_domain *domain,
-                                       unsigned long l_iova, size_t size)
+static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
+                                unsigned long l_iova, size_t size)
 {
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
        sysmmu_pte_t *ent;
        size_t err_pgsize;
        unsigned long flags;
 
-       BUG_ON(priv->pgtable == NULL);
+       BUG_ON(domain->pgtable == NULL);
 
-       spin_lock_irqsave(&priv->pgtablelock, flags);
+       spin_lock_irqsave(&domain->pgtablelock, flags);
 
-       ent = section_entry(priv->pgtable, iova);
+       ent = section_entry(domain->pgtable, iova);
 
        if (lv1ent_section(ent)) {
                if (WARN_ON(size < SECT_SIZE)) {
@@ -1093,7 +1056,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
                *ent = 0;
                size = SPAGE_SIZE;
                pgtable_flush(ent, ent + 1);
-               priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+               domain->lv2entcnt[lv1ent_offset(iova)] += 1;
                goto done;
        }
 
@@ -1107,15 +1070,15 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
        pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
 
        size = LPAGE_SIZE;
-       priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+       domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
 done:
-       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+       spin_unlock_irqrestore(&domain->pgtablelock, flags);
 
-       exynos_iommu_tlb_invalidate_entry(priv, iova, size);
+       exynos_iommu_tlb_invalidate_entry(domain, iova, size);
 
        return size;
 err:
-       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+       spin_unlock_irqrestore(&domain->pgtablelock, flags);
 
        pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
                __func__, size, iova, err_pgsize);
@@ -1123,17 +1086,17 @@ err:
        return 0;
 }
 
-static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
                                          dma_addr_t iova)
 {
-       struct exynos_iommu_domain *priv = to_exynos_domain(domain);
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        sysmmu_pte_t *entry;
        unsigned long flags;
        phys_addr_t phys = 0;
 
-       spin_lock_irqsave(&priv->pgtablelock, flags);
+       spin_lock_irqsave(&domain->pgtablelock, flags);
 
-       entry = section_entry(priv->pgtable, iova);
+       entry = section_entry(domain->pgtable, iova);
 
        if (lv1ent_section(entry)) {
                phys = section_phys(entry) + section_offs(iova);
@@ -1146,7 +1109,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
                        phys = spage_phys(entry) + spage_offs(iova);
        }
 
-       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+       spin_unlock_irqrestore(&domain->pgtablelock, flags);
 
        return phys;
 }
@@ -1156,6 +1119,9 @@ static int exynos_iommu_add_device(struct device *dev)
        struct iommu_group *group;
        int ret;
 
+       if (!has_sysmmu(dev))
+               return -ENODEV;
+
        group = iommu_group_get(dev);
 
        if (!group) {
@@ -1174,10 +1140,40 @@ static int exynos_iommu_add_device(struct device *dev)
 
 static void exynos_iommu_remove_device(struct device *dev)
 {
+       if (!has_sysmmu(dev))
+               return;
+
        iommu_group_remove_device(dev);
 }
 
-static const struct iommu_ops exynos_iommu_ops = {
+static int exynos_iommu_of_xlate(struct device *dev,
+                                struct of_phandle_args *spec)
+{
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct platform_device *sysmmu = of_find_device_by_node(spec->np);
+       struct sysmmu_drvdata *data;
+
+       if (!sysmmu)
+               return -ENODEV;
+
+       data = platform_get_drvdata(sysmmu);
+       if (!data)
+               return -ENODEV;
+
+       if (!owner) {
+               owner = kzalloc(sizeof(*owner), GFP_KERNEL);
+               if (!owner)
+                       return -ENOMEM;
+
+               INIT_LIST_HEAD(&owner->controllers);
+               dev->archdata.iommu = owner;
+       }
+
+       list_add_tail(&data->owner_node, &owner->controllers);
+       return 0;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
        .domain_alloc = exynos_iommu_domain_alloc,
        .domain_free = exynos_iommu_domain_free,
        .attach_dev = exynos_iommu_attach_device,
@@ -1189,19 +1185,15 @@ static const struct iommu_ops exynos_iommu_ops = {
        .add_device = exynos_iommu_add_device,
        .remove_device = exynos_iommu_remove_device,
        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+       .of_xlate = exynos_iommu_of_xlate,
 };
 
+static bool init_done;
+
 static int __init exynos_iommu_init(void)
 {
-       struct device_node *np;
        int ret;
 
-       np = of_find_matching_node(NULL, sysmmu_of_match);
-       if (!np)
-               return 0;
-
-       of_node_put(np);
-
        lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
                                LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
        if (!lv2table_kmem_cache) {
@@ -1230,6 +1222,8 @@ static int __init exynos_iommu_init(void)
                goto err_set_iommu;
        }
 
+       init_done = true;
+
        return 0;
 err_set_iommu:
        kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
@@ -1239,4 +1233,21 @@ err_reg_driver:
        kmem_cache_destroy(lv2table_kmem_cache);
        return ret;
 }
-subsys_initcall(exynos_iommu_init);
+
+static int __init exynos_iommu_of_setup(struct device_node *np)
+{
+       struct platform_device *pdev;
+
+       if (!init_done)
+               exynos_iommu_init();
+
+       pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       of_iommu_set_ops(np, &exynos_iommu_ops);
+       return 0;
+}
+
+IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
+                exynos_iommu_of_setup);
index 68d43beccb7e560f845ad49b8ae7d9e38872fcf7..a98a7b27aca1dec2cb2f53319df8a49abcf8e645 100644 (file)
  *          Shaohua Li <shaohua.li@intel.com>,
  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
  *          Fenghua Yu <fenghua.yu@intel.com>
+ *          Joerg Roedel <jroedel@suse.de>
  */
 
+#define pr_fmt(fmt)     "DMAR: " fmt
+
 #include <linux/init.h>
 #include <linux/bitmap.h>
 #include <linux/debugfs.h>
@@ -40,6 +43,7 @@
 #include <linux/pci-ats.h>
 #include <linux/memblock.h>
 #include <linux/dma-contiguous.h>
+#include <linux/crash_dump.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -190,7 +194,29 @@ struct root_entry {
 };
 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
 
+/*
+ * Take a root_entry and return the Lower Context Table Pointer (LCTP)
+ * if marked present.
+ */
+static phys_addr_t root_entry_lctp(struct root_entry *re)
+{
+       if (!(re->lo & 1))
+               return 0;
+
+       return re->lo & VTD_PAGE_MASK;
+}
+
+/*
+ * Take a root_entry and return the Upper Context Table Pointer (UCTP)
+ * if marked present.
+ */
+static phys_addr_t root_entry_uctp(struct root_entry *re)
+{
+       if (!(re->hi & 1))
+               return 0;
 
+       return re->hi & VTD_PAGE_MASK;
+}
 /*
  * low 64 bits:
  * 0: present
@@ -207,10 +233,38 @@ struct context_entry {
        u64 hi;
 };
 
-static inline bool context_present(struct context_entry *context)
+static inline void context_clear_pasid_enable(struct context_entry *context)
+{
+       context->lo &= ~(1ULL << 11);
+}
+
+static inline bool context_pasid_enabled(struct context_entry *context)
+{
+       return !!(context->lo & (1ULL << 11));
+}
+
+static inline void context_set_copied(struct context_entry *context)
+{
+       context->hi |= (1ull << 3);
+}
+
+static inline bool context_copied(struct context_entry *context)
+{
+       return !!(context->hi & (1ULL << 3));
+}
+
+static inline bool __context_present(struct context_entry *context)
 {
        return (context->lo & 1);
 }
+
+static inline bool context_present(struct context_entry *context)
+{
+       return context_pasid_enabled(context) ?
+            __context_present(context) :
+            __context_present(context) && !context_copied(context);
+}
+
 static inline void context_set_present(struct context_entry *context)
 {
        context->lo |= 1;
@@ -247,6 +301,11 @@ static inline void context_set_domain_id(struct context_entry *context,
        context->hi |= (value & ((1 << 16) - 1)) << 8;
 }
 
+static inline int context_domain_id(struct context_entry *c)
+{
+       return((c->hi >> 8) & 0xffff);
+}
+
 static inline void context_clear_entry(struct context_entry *context)
 {
        context->lo = 0;
@@ -422,6 +481,14 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations â€” the ones with
+ * PASID support on bit 28 â€” have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+                           ecap_pasid(iommu->ecap))
 
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -432,6 +499,25 @@ static LIST_HEAD(device_domain_list);
 
 static const struct iommu_ops intel_iommu_ops;
 
+static bool translation_pre_enabled(struct intel_iommu *iommu)
+{
+       return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
+}
+
+static void clear_translation_pre_enabled(struct intel_iommu *iommu)
+{
+       iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
+}
+
+static void init_translation_status(struct intel_iommu *iommu)
+{
+       u32 gsts;
+
+       gsts = readl(iommu->reg + DMAR_GSTS_REG);
+       if (gsts & DMA_GSTS_TES)
+               iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
+}
+
 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
 {
@@ -445,26 +531,26 @@ static int __init intel_iommu_setup(char *str)
        while (*str) {
                if (!strncmp(str, "on", 2)) {
                        dmar_disabled = 0;
-                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+                       pr_info("IOMMU enabled\n");
                } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
-                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
+                       pr_info("IOMMU disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable GFX device mapping\n");
+                       pr_info("Disable GFX device mapping\n");
                } else if (!strncmp(str, "forcedac", 8)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: Forcing DAC for PCI devices\n");
+                       pr_info("Forcing DAC for PCI devices\n");
                        dmar_forcedac = 1;
                } else if (!strncmp(str, "strict", 6)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable batched IOTLB flush\n");
+                       pr_info("Disable batched IOTLB flush\n");
                        intel_iommu_strict = 1;
                } else if (!strncmp(str, "sp_off", 6)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable supported super page\n");
+                       pr_info("Disable supported super page\n");
                        intel_iommu_superpage = 0;
+               } else if (!strncmp(str, "ecs_off", 7)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable extended context table support\n");
+                       intel_iommu_ecs = 0;
                }
 
                str += strcspn(str, ",");
@@ -669,7 +755,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        struct context_entry *context;
        u64 *entry;
 
-       if (ecap_ecs(iommu->ecap)) {
+       if (ecs_enabled(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
                        entry = &root->hi;
@@ -696,6 +782,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        return &context[devfn];
 }
 
+static int iommu_dummy(struct device *dev)
+{
+       return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
        struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +796,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
        u16 segment = 0;
        int i;
 
+       if (iommu_dummy(dev))
+               return NULL;
+
        if (dev_is_pci(dev)) {
                pdev = to_pci_dev(dev);
                segment = pci_domain_nr(pdev->bus);
@@ -798,7 +892,7 @@ static void free_context_table(struct intel_iommu *iommu)
                if (context)
                        free_pgtable_page(context);
 
-               if (!ecap_ecs(iommu->ecap))
+               if (!ecs_enabled(iommu))
                        continue;
 
                context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1112,7 +1206,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 
        root = (struct root_entry *)alloc_pgtable_page(iommu->node);
        if (!root) {
-               pr_err("IOMMU: allocating root entry for %s failed\n",
+               pr_err("Allocating root entry for %s failed\n",
                        iommu->name);
                return -ENOMEM;
        }
@@ -1133,7 +1227,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
        unsigned long flag;
 
        addr = virt_to_phys(iommu->root_entry);
-       if (ecap_ecs(iommu->ecap))
+       if (ecs_enabled(iommu))
                addr |= DMA_RTADDR_RTT;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1250,9 +1344,9 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
 
        /* check IOTLB invalidation granularity */
        if (DMA_TLB_IAIG(val) == 0)
-               printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
+               pr_err("Flush IOTLB failed\n");
        if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
-               pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
+               pr_debug("TLB flush request %Lx, actual %Lx\n",
                        (unsigned long long)DMA_TLB_IIRG(type),
                        (unsigned long long)DMA_TLB_IAIG(val));
 }
@@ -1423,8 +1517,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        unsigned long nlongs;
 
        ndomains = cap_ndoms(iommu->cap);
-       pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
-                iommu->seq_id, ndomains);
+       pr_debug("%s: Number of Domains supported <%ld>\n",
+                iommu->name, ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
        spin_lock_init(&iommu->lock);
@@ -1434,15 +1528,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
         */
        iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
        if (!iommu->domain_ids) {
-               pr_err("IOMMU%d: allocating domain id array failed\n",
-                      iommu->seq_id);
+               pr_err("%s: Allocating domain id array failed\n",
+                      iommu->name);
                return -ENOMEM;
        }
        iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
                        GFP_KERNEL);
        if (!iommu->domains) {
-               pr_err("IOMMU%d: allocating domain array failed\n",
-                      iommu->seq_id);
+               pr_err("%s: Allocating domain array failed\n",
+                      iommu->name);
                kfree(iommu->domain_ids);
                iommu->domain_ids = NULL;
                return -ENOMEM;
@@ -1547,7 +1641,7 @@ static int iommu_attach_domain(struct dmar_domain *domain,
        num = __iommu_attach_domain(domain, iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
        if (num < 0)
-               pr_err("IOMMU: no free domain ids\n");
+               pr_err("%s: No free domain ids\n", iommu->name);
 
        return num;
 }
@@ -1639,7 +1733,7 @@ static int dmar_init_reserved_ranges(void)
        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
                IOVA_PFN(IOAPIC_RANGE_END));
        if (!iova) {
-               printk(KERN_ERR "Reserve IOAPIC range failed\n");
+               pr_err("Reserve IOAPIC range failed\n");
                return -ENODEV;
        }
 
@@ -1655,7 +1749,7 @@ static int dmar_init_reserved_ranges(void)
                                            IOVA_PFN(r->start),
                                            IOVA_PFN(r->end));
                        if (!iova) {
-                               printk(KERN_ERR "Reserve iova failed\n");
+                               pr_err("Reserve iova failed\n");
                                return -ENODEV;
                        }
                }
@@ -1702,7 +1796,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        sagaw = cap_sagaw(iommu->cap);
        if (!test_bit(agaw, &sagaw)) {
                /* hardware doesn't support it, choose a bigger one */
-               pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
+               pr_debug("Hardware doesn't support agaw %d\n", agaw);
                agaw = find_next_bit(&sagaw, 5, agaw);
                if (agaw >= 5)
                        return -ENODEV;
@@ -1795,6 +1889,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                return 0;
        }
 
+       context_clear_entry(context);
+
        id = domain->id;
        pgd = domain->pgd;
 
@@ -1803,7 +1899,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                        id = iommu_attach_vm_domain(domain, iommu);
                        if (id < 0) {
                                spin_unlock_irqrestore(&iommu->lock, flags);
-                               pr_err("IOMMU: no free domain ids\n");
+                               pr_err("%s: No free domain ids\n", iommu->name);
                                return -EFAULT;
                        }
                }
@@ -2030,8 +2126,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
                if (tmp) {
                        static int dumps = 5;
-                       printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
-                              iov_pfn, tmp, (unsigned long long)pteval);
+                       pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+                               iov_pfn, tmp, (unsigned long long)pteval);
                        if (dumps) {
                                dumps--;
                                debug_dma_dump_mappings(NULL);
@@ -2303,7 +2399,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
 
        if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
                          dma_to_mm_pfn(last_vpfn))) {
-               printk(KERN_ERR "IOMMU: reserve iova failed\n");
+               pr_err("Reserving iova failed\n");
                return -ENOMEM;
        }
 
@@ -2336,15 +2432,14 @@ static int iommu_prepare_identity_map(struct device *dev,
           range which is reserved in E820, so which didn't get set
           up to start with in si_domain */
        if (domain == si_domain && hw_pass_through) {
-               printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
-                      dev_name(dev), start, end);
+               pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+                       dev_name(dev), start, end);
                return 0;
        }
 
-       printk(KERN_INFO
-              "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
-              dev_name(dev), start, end);
-       
+       pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+               dev_name(dev), start, end);
+
        if (end < start) {
                WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
                        "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -2401,12 +2496,11 @@ static inline void iommu_prepare_isa(void)
        if (!pdev)
                return;
 
-       printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
+       pr_info("Prepare 0-16MiB unity mapping for LPC\n");
        ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
 
        if (ret)
-               printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
-                      "floppy might not work\n");
+               pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
 
        pci_dev_put(pdev);
 }
@@ -2450,7 +2544,7 @@ static int __init si_domain_init(int hw)
                return -EFAULT;
        }
 
-       pr_debug("IOMMU: identity mapping domain is domain %d\n",
+       pr_debug("Identity mapping domain is domain %d\n",
                 si_domain->id);
 
        if (hw)
@@ -2650,8 +2744,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
                                  hw ? CONTEXT_TT_PASS_THROUGH :
                                       CONTEXT_TT_MULTI_LEVEL);
        if (!ret)
-               pr_info("IOMMU: %s identity mapping for device %s\n",
-                       hw ? "hardware" : "software", dev_name(dev));
+               pr_info("%s identity mapping for device %s\n",
+                       hw ? "Hardware" : "Software", dev_name(dev));
        else if (ret == -ENODEV)
                /* device not associated with an iommu */
                ret = 0;
@@ -2669,10 +2763,6 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
        int i;
        int ret = 0;
 
-       ret = si_domain_init(hw);
-       if (ret)
-               return -EFAULT;
-
        for_each_pci_dev(pdev) {
                ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
                if (ret)
@@ -2686,7 +2776,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
 
                        if (dev->bus != &acpi_bus_type)
                                continue;
-                               
+
                        adev= to_acpi_device(dev);
                        mutex_lock(&adev->physical_node_lock);
                        list_for_each_entry(pn, &adev->physical_node_list, node) {
@@ -2728,19 +2818,200 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
                 */
                iommu->flush.flush_context = __iommu_flush_context;
                iommu->flush.flush_iotlb = __iommu_flush_iotlb;
-               pr_info("IOMMU: %s using Register based invalidation\n",
+               pr_info("%s: Using Register based invalidation\n",
                        iommu->name);
        } else {
                iommu->flush.flush_context = qi_flush_context;
                iommu->flush.flush_iotlb = qi_flush_iotlb;
-               pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+               pr_info("%s: Using Queued invalidation\n", iommu->name);
        }
 }
 
+static int copy_context_table(struct intel_iommu *iommu,
+                             struct root_entry *old_re,
+                             struct context_entry **tbl,
+                             int bus, bool ext)
+{
+       struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
+       int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+       phys_addr_t old_ce_phys;
+
+       tbl_idx = ext ? bus * 2 : bus;
+
+       for (devfn = 0; devfn < 256; devfn++) {
+               /* First calculate the correct index */
+               idx = (ext ? devfn * 2 : devfn) % 256;
+
+               if (idx == 0) {
+                       /* First save what we may have and clean up */
+                       if (new_ce) {
+                               tbl[tbl_idx] = new_ce;
+                               __iommu_flush_cache(iommu, new_ce,
+                                                   VTD_PAGE_SIZE);
+                               pos = 1;
+                       }
+
+                       if (old_ce)
+                               iounmap(old_ce);
+
+                       ret = 0;
+                       if (devfn < 0x80)
+                               old_ce_phys = root_entry_lctp(old_re);
+                       else
+                               old_ce_phys = root_entry_uctp(old_re);
+
+                       if (!old_ce_phys) {
+                               if (ext && devfn == 0) {
+                                       /* No LCTP, try UCTP */
+                                       devfn = 0x7f;
+                                       continue;
+                               } else {
+                                       goto out;
+                               }
+                       }
+
+                       ret = -ENOMEM;
+                       old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
+                       if (!old_ce)
+                               goto out;
+
+                       new_ce = alloc_pgtable_page(iommu->node);
+                       if (!new_ce)
+                               goto out_unmap;
+
+                       ret = 0;
+               }
+
+               /* Now copy the context entry */
+               ce = old_ce[idx];
+
+               if (!__context_present(&ce))
+                       continue;
+
+               did = context_domain_id(&ce);
+               if (did >= 0 && did < cap_ndoms(iommu->cap))
+                       set_bit(did, iommu->domain_ids);
+
+               /*
+                * We need a marker for copied context entries. This
+                * marker needs to work for the old format as well as
+                * for extended context entries.
+                *
+                * Bit 67 of the context entry is used. In the old
+                * format this bit is available to software, in the
+                * extended format it is the PGE bit, but PGE is ignored
+                * by HW if PASIDs are disabled (and thus still
+                * available).
+                *
+                * So disable PASIDs first and then mark the entry
+                * copied. This means that we don't copy PASID
+                * translations from the old kernel, but this is fine as
+                * faults there are not fatal.
+                */
+               context_clear_pasid_enable(&ce);
+               context_set_copied(&ce);
+
+               new_ce[idx] = ce;
+       }
+
+       tbl[tbl_idx + pos] = new_ce;
+
+       __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
+
+out_unmap:
+       iounmap(old_ce);
+
+out:
+       return ret;
+}
+
+static int copy_translation_tables(struct intel_iommu *iommu)
+{
+       struct context_entry **ctxt_tbls;
+       struct root_entry *old_rt;
+       phys_addr_t old_rt_phys;
+       int ctxt_table_entries;
+       unsigned long flags;
+       u64 rtaddr_reg;
+       int bus, ret;
+       bool new_ext, ext;
+
+       rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
+       ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
+       new_ext    = !!ecap_ecs(iommu->ecap);
+
+       /*
+        * The RTT bit can only be changed when translation is disabled,
+        * but disabling translation means to open a window for data
+        * corruption. So bail out and don't copy anything if we would
+        * have to change the bit.
+        */
+       if (new_ext != ext)
+               return -EINVAL;
+
+       old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
+       if (!old_rt_phys)
+               return -EINVAL;
+
+       old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
+       if (!old_rt)
+               return -ENOMEM;
+
+       /* This is too big for the stack - allocate it from slab */
+       ctxt_table_entries = ext ? 512 : 256;
+       ret = -ENOMEM;
+       ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
+       if (!ctxt_tbls)
+               goto out_unmap;
+
+       for (bus = 0; bus < 256; bus++) {
+               ret = copy_context_table(iommu, &old_rt[bus],
+                                        ctxt_tbls, bus, ext);
+               if (ret) {
+                       pr_err("%s: Failed to copy context table for bus %d\n",
+                               iommu->name, bus);
+                       continue;
+               }
+       }
+
+       spin_lock_irqsave(&iommu->lock, flags);
+
+       /* Context tables are copied, now write them to the root_entry table */
+       for (bus = 0; bus < 256; bus++) {
+               int idx = ext ? bus * 2 : bus;
+               u64 val;
+
+               if (ctxt_tbls[idx]) {
+                       val = virt_to_phys(ctxt_tbls[idx]) | 1;
+                       iommu->root_entry[bus].lo = val;
+               }
+
+               if (!ext || !ctxt_tbls[idx + 1])
+                       continue;
+
+               val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
+               iommu->root_entry[bus].hi = val;
+       }
+
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
+       kfree(ctxt_tbls);
+
+       __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
+
+       ret = 0;
+
+out_unmap:
+       iounmap(old_rt);
+
+       return ret;
+}
+
 static int __init init_dmars(void)
 {
        struct dmar_drhd_unit *drhd;
        struct dmar_rmrr_unit *rmrr;
+       bool copied_tables = false;
        struct device *dev;
        struct intel_iommu *iommu;
        int i, ret;
@@ -2761,8 +3032,7 @@ static int __init init_dmars(void)
                        g_num_of_iommus++;
                        continue;
                }
-               printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
-                         DMAR_UNITS_SUPPORTED);
+               pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
        }
 
        /* Preallocate enough resources for IOMMU hot-addition */
@@ -2772,7 +3042,7 @@ static int __init init_dmars(void)
        g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
                        GFP_KERNEL);
        if (!g_iommus) {
-               printk(KERN_ERR "Allocating global iommu array failed\n");
+               pr_err("Allocating global iommu array failed\n");
                ret = -ENOMEM;
                goto error;
        }
@@ -2787,10 +3057,21 @@ static int __init init_dmars(void)
        for_each_active_iommu(iommu, drhd) {
                g_iommus[iommu->seq_id] = iommu;
 
+               intel_iommu_init_qi(iommu);
+
                ret = iommu_init_domains(iommu);
                if (ret)
                        goto free_iommu;
 
+               init_translation_status(iommu);
+
+               if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
+                       iommu_disable_translation(iommu);
+                       clear_translation_pre_enabled(iommu);
+                       pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
+                               iommu->name);
+               }
+
                /*
                 * TBD:
                 * we could share the same root & context tables
@@ -2799,13 +3080,41 @@ static int __init init_dmars(void)
                ret = iommu_alloc_root_entry(iommu);
                if (ret)
                        goto free_iommu;
+
+               if (translation_pre_enabled(iommu)) {
+                       pr_info("Translation already enabled - trying to copy translation structures\n");
+
+                       ret = copy_translation_tables(iommu);
+                       if (ret) {
+                               /*
+                                * We found the IOMMU with translation
+                                * enabled - but failed to copy over the
+                                * old root-entry table. Try to proceed
+                                * by disabling translation now and
+                                * allocating a clean root-entry table.
+                                * This might cause DMAR faults, but
+                                * probably the dump will still succeed.
+                                */
+                               pr_err("Failed to copy translation tables from previous kernel for %s\n",
+                                      iommu->name);
+                               iommu_disable_translation(iommu);
+                               clear_translation_pre_enabled(iommu);
+                       } else {
+                               pr_info("Copied translation tables from previous kernel for %s\n",
+                                       iommu->name);
+                               copied_tables = true;
+                       }
+               }
+
+               iommu_flush_write_buffer(iommu);
+               iommu_set_root_entry(iommu);
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
        }
 
-       for_each_active_iommu(iommu, drhd)
-               intel_iommu_init_qi(iommu);
-
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
@@ -2813,8 +3122,23 @@ static int __init init_dmars(void)
        iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
 
+       if (iommu_identity_mapping) {
+               ret = si_domain_init(hw_pass_through);
+               if (ret)
+                       goto free_iommu;
+       }
+
        check_tylersburg_isoch();
 
+       /*
+        * If we copied translations from a previous kernel in the kdump
+        * case, we can not assign the devices to domains now, as that
+        * would eliminate the old mappings. So skip this part and defer
+        * the assignment to device driver initialization time.
+        */
+       if (copied_tables)
+               goto domains_done;
+
        /*
         * If pass through is not set or not enabled, setup context entries for
         * identity mappings for rmrr, gfx, and isa and may fall back to static
@@ -2823,7 +3147,7 @@ static int __init init_dmars(void)
        if (iommu_identity_mapping) {
                ret = iommu_prepare_static_identity_mapping(hw_pass_through);
                if (ret) {
-                       printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+                       pr_crit("Failed to setup IOMMU pass-through\n");
                        goto free_iommu;
                }
        }
@@ -2841,20 +3165,21 @@ static int __init init_dmars(void)
         *    endfor
         * endfor
         */
-       printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+       pr_info("Setting RMRR:\n");
        for_each_rmrr_units(rmrr) {
                /* some BIOS lists non-exist devices in DMAR table. */
                for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
                                          i, dev) {
                        ret = iommu_prepare_rmrr_dev(rmrr, dev);
                        if (ret)
-                               printk(KERN_ERR
-                                      "IOMMU: mapping reserved region failed\n");
+                               pr_err("Mapping reserved region failed\n");
                }
        }
 
        iommu_prepare_isa();
 
+domains_done:
+
        /*
         * for each drhd
         *   enable fault log
@@ -2879,11 +3204,9 @@ static int __init init_dmars(void)
                if (ret)
                        goto free_iommu;
 
-               iommu_set_root_entry(iommu);
+               if (!translation_pre_enabled(iommu))
+                       iommu_enable_translation(iommu);
 
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-               iommu_enable_translation(iommu);
                iommu_disable_protect_mem_regions(iommu);
        }
 
@@ -2924,7 +3247,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
        }
        iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
        if (unlikely(!iova)) {
-               printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+               pr_err("Allocating %ld-page iova for %s failed",
                       nrpages, dev_name(dev));
                return NULL;
        }
@@ -2939,7 +3262,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 
        domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain) {
-               printk(KERN_ERR "Allocating domain for %s failed",
+               pr_err("Allocating domain for %s failed\n",
                       dev_name(dev));
                return NULL;
        }
@@ -2948,7 +3271,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
        if (unlikely(!domain_context_mapped(dev))) {
                ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
                if (ret) {
-                       printk(KERN_ERR "Domain context map for %s failed",
+                       pr_err("Domain context map for %s failed\n",
                               dev_name(dev));
                        return NULL;
                }
@@ -2969,11 +3292,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
        return __get_valid_domain_for_dev(dev);
 }
 
-static int iommu_dummy(struct device *dev)
-{
-       return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
 /* Check if the dev needs to go through non-identity map and unmap process.*/
 static int iommu_no_mapping(struct device *dev)
 {
@@ -2995,8 +3313,8 @@ static int iommu_no_mapping(struct device *dev)
                         * to non-identity mapping.
                         */
                        domain_remove_one_dev_info(si_domain, dev);
-                       printk(KERN_INFO "32bit %s uses non-identity mapping\n",
-                              dev_name(dev));
+                       pr_info("32bit %s uses non-identity mapping\n",
+                               dev_name(dev));
                        return 0;
                }
        } else {
@@ -3011,8 +3329,8 @@ static int iommu_no_mapping(struct device *dev)
                                                  CONTEXT_TT_PASS_THROUGH :
                                                  CONTEXT_TT_MULTI_LEVEL);
                        if (!ret) {
-                               printk(KERN_INFO "64bit %s uses identity mapping\n",
-                                      dev_name(dev));
+                               pr_info("64bit %s uses identity mapping\n",
+                                       dev_name(dev));
                                return 1;
                        }
                }
@@ -3081,7 +3399,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 error:
        if (iova)
                __free_iova(&domain->iovad, iova);
-       printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
+       pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
                dev_name(dev), size, (unsigned long long)paddr, dir);
        return 0;
 }
@@ -3396,7 +3714,7 @@ static inline int iommu_domain_cache_init(void)
 
                                         NULL);
        if (!iommu_domain_cache) {
-               printk(KERN_ERR "Couldn't create iommu_domain cache\n");
+               pr_err("Couldn't create iommu_domain cache\n");
                ret = -ENOMEM;
        }
 
@@ -3413,7 +3731,7 @@ static inline int iommu_devinfo_cache_init(void)
                                         SLAB_HWCACHE_ALIGN,
                                         NULL);
        if (!iommu_devinfo_cache) {
-               printk(KERN_ERR "Couldn't create devinfo cache\n");
+               pr_err("Couldn't create devinfo cache\n");
                ret = -ENOMEM;
        }
 
@@ -3790,19 +4108,19 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
                return 0;
 
        if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
-               pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+               pr_warn("%s: Doesn't support hardware pass through.\n",
                        iommu->name);
                return -ENXIO;
        }
        if (!ecap_sc_support(iommu->ecap) &&
            domain_update_iommu_snooping(iommu)) {
-               pr_warn("IOMMU: %s doesn't support snooping.\n",
+               pr_warn("%s: Doesn't support snooping.\n",
                        iommu->name);
                return -ENXIO;
        }
        sp = domain_update_iommu_superpage(iommu) - 1;
        if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
-               pr_warn("IOMMU: %s doesn't support large page.\n",
+               pr_warn("%s: Doesn't support large page.\n",
                        iommu->name);
                return -ENXIO;
        }
@@ -4033,7 +4351,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
                start = mhp->start_pfn << PAGE_SHIFT;
                end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
                if (iommu_domain_identity_map(si_domain, start, end)) {
-                       pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
+                       pr_warn("Failed to build identity map for [%llx-%llx]\n",
                                start, end);
                        return NOTIFY_BAD;
                }
@@ -4051,7 +4369,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
 
                        iova = find_iova(&si_domain->iovad, start_vpfn);
                        if (iova == NULL) {
-                               pr_debug("dmar: failed get IOVA for PFN %lx\n",
+                               pr_debug("Failed get IOVA for PFN %lx\n",
                                         start_vpfn);
                                break;
                        }
@@ -4059,7 +4377,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
                        iova = split_and_remove_iova(&si_domain->iovad, iova,
                                                     start_vpfn, last_vpfn);
                        if (iova == NULL) {
-                               pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
+                               pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
                                        start_vpfn, last_vpfn);
                                return NOTIFY_BAD;
                        }
@@ -4168,13 +4486,6 @@ int __init intel_iommu_init(void)
                goto out_free_dmar;
        }
 
-       /*
-        * Disable translation if already enabled prior to OS handover.
-        */
-       for_each_active_iommu(iommu, drhd)
-               if (iommu->gcmd & DMA_GCMD_TE)
-                       iommu_disable_translation(iommu);
-
        if (dmar_dev_scope_init() < 0) {
                if (force_on)
                        panic("tboot: Failed to initialize DMAR device scope\n");
@@ -4185,10 +4496,10 @@ int __init intel_iommu_init(void)
                goto out_free_dmar;
 
        if (list_empty(&dmar_rmrr_units))
-               printk(KERN_INFO "DMAR: No RMRR found\n");
+               pr_info("No RMRR found\n");
 
        if (list_empty(&dmar_atsr_units))
-               printk(KERN_INFO "DMAR: No ATSR found\n");
+               pr_info("No ATSR found\n");
 
        if (dmar_init_reserved_ranges()) {
                if (force_on)
@@ -4202,12 +4513,11 @@ int __init intel_iommu_init(void)
        if (ret) {
                if (force_on)
                        panic("tboot: Failed to initialize DMARs\n");
-               printk(KERN_ERR "IOMMU: dmar init failed\n");
+               pr_err("Initialization failed\n");
                goto out_free_reserved_range;
        }
        up_write(&dmar_global_lock);
-       printk(KERN_INFO
-       "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
+       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
        init_timer(&unmap_timer);
 #ifdef CONFIG_SWIOTLB
@@ -4349,13 +4659,11 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 
        dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
        if (!dmar_domain) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               pr_err("Can't allocate dmar_domain\n");
                return NULL;
        }
        if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_init() failed\n");
+               pr_err("Domain initialization failed\n");
                domain_exit(dmar_domain);
                return NULL;
        }
@@ -4414,7 +4722,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                addr_width = cap_mgaw(iommu->cap);
 
        if (dmar_domain->max_addr > (1LL << addr_width)) {
-               printk(KERN_ERR "%s: iommu width (%d) is not "
+               pr_err("%s: iommu width (%d) is not "
                       "sufficient for the mapped address (%llx)\n",
                       __func__, addr_width, dmar_domain->max_addr);
                return -EFAULT;
@@ -4468,7 +4776,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
                /* check if minimum agaw is sufficient for mapped address */
                end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
                if (end < max_addr) {
-                       printk(KERN_ERR "%s: iommu width (%d) is not "
+                       pr_err("%s: iommu width (%d) is not "
                               "sufficient for the mapped address (%llx)\n",
                               __func__, dmar_domain->gaw, max_addr);
                        return -EFAULT;
@@ -4609,7 +4917,7 @@ static const struct iommu_ops intel_iommu_ops = {
 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
 {
        /* G4x/GM45 integrated gfx dmar support is totally busted. */
-       printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+       pr_info("Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
 
@@ -4627,7 +4935,7 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
         * Mobile 4 Series Chipset neglects to set RWBF capability,
         * but needs it. Same seems to hold for the desktop versions.
         */
-       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+       pr_info("Forcing write-buffer flush capability\n");
        rwbf_quirk = 1;
 }
 
@@ -4657,11 +4965,11 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
                return;
 
        if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
-               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+               pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
                dmar_map_gfx = 0;
        } else if (dmar_map_gfx) {
                /* we have to ensure the gfx device is idle before we flush */
-               printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
+               pr_info("Disabling batched IOTLB flush on Ironlake\n");
                intel_iommu_strict = 1;
        }
 }
@@ -4723,7 +5031,7 @@ static void __init check_tylersburg_isoch(void)
                iommu_identity_mapping |= IDENTMAP_AZALIA;
                return;
        }
-       
-       printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+
+       pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
               vtisochctrl);
 }
index 5709ae9c3e771d2f82a1bda2a23d500d8f4faffe..47fcebf39e9e26717de9824734c4b15d3acc4f2e 100644 (file)
@@ -1,3 +1,6 @@
+
+#define pr_fmt(fmt)     "DMAR-IR: " fmt
+
 #include <linux/interrupt.h>
 #include <linux/dmar.h>
 #include <linux/spinlock.h>
@@ -8,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/intel-iommu.h>
 #include <linux/acpi.h>
+#include <linux/crash_dump.h>
 #include <asm/io_apic.h>
 #include <asm/smp.h>
 #include <asm/cpu.h>
@@ -51,8 +55,28 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  */
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
+static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
 static int __init parse_ioapics_under_ir(void);
 
+static bool ir_pre_enabled(struct intel_iommu *iommu)
+{
+       return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
+}
+
+static void clear_ir_pre_enabled(struct intel_iommu *iommu)
+{
+       iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
+}
+
+static void init_ir_status(struct intel_iommu *iommu)
+{
+       u32 gsts;
+
+       gsts = readl(iommu->reg + DMAR_GSTS_REG);
+       if (gsts & DMA_GSTS_IRES)
+               iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
+}
+
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
        struct irq_cfg *cfg = irq_cfg(irq);
@@ -100,8 +124,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        }
 
        if (mask > ecap_max_handle_mask(iommu->ecap)) {
-               printk(KERN_ERR
-                      "Requested mask %x exceeds the max invalidation handle"
+               pr_err("Requested mask %x exceeds the max invalidation handle"
                       " mask value %Lx\n", mask,
                       ecap_max_handle_mask(iommu->ecap));
                return -1;
@@ -333,7 +356,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
        up_read(&dmar_global_lock);
 
        if (sid == 0) {
-               pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
+               pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
                return -1;
        }
 
@@ -360,7 +383,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
        up_read(&dmar_global_lock);
 
        if (sid == 0) {
-               pr_warning("Failed to set source-id of HPET block (%d)\n", id);
+               pr_warn("Failed to set source-id of HPET block (%d)\n", id);
                return -1;
        }
 
@@ -424,11 +447,59 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
        return 0;
 }
 
+static int iommu_load_old_irte(struct intel_iommu *iommu)
+{
+       struct irte *old_ir_table;
+       phys_addr_t irt_phys;
+       unsigned int i;
+       size_t size;
+       u64 irta;
+
+       if (!is_kdump_kernel()) {
+               pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
+                       iommu->name);
+               clear_ir_pre_enabled(iommu);
+               iommu_disable_irq_remapping(iommu);
+               return -EINVAL;
+       }
+
+       /* Check whether the old ir-table has the same size as ours */
+       irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
+       if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
+            != INTR_REMAP_TABLE_REG_SIZE)
+               return -EINVAL;
+
+       irt_phys = irta & VTD_PAGE_MASK;
+       size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
+
+       /* Map the old IR table */
+       old_ir_table = ioremap_cache(irt_phys, size);
+       if (!old_ir_table)
+               return -ENOMEM;
+
+       /* Copy data over */
+       memcpy(iommu->ir_table->base, old_ir_table, size);
+
+       __iommu_flush_cache(iommu, iommu->ir_table->base, size);
+
+       /*
+        * Now check the table for used entries and mark those as
+        * allocated in the bitmap
+        */
+       for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
+               if (iommu->ir_table->base[i].present)
+                       bitmap_set(iommu->ir_table->bitmap, i, 1);
+       }
+
+       return 0;
+}
+
+
 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
 {
+       unsigned long flags;
        u64 addr;
        u32 sts;
-       unsigned long flags;
 
        addr = virt_to_phys((void *)iommu->ir_table->base);
 
@@ -445,10 +516,16 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
        raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
 
        /*
-        * global invalidation of interrupt entry cache before enabling
-        * interrupt-remapping.
+        * Global invalidation of interrupt entry cache to make sure the
+        * hardware uses the new irq remapping table.
         */
        qi_global_iec(iommu);
+}
+
+static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
+{
+       unsigned long flags;
+       u32 sts;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
@@ -505,12 +582,48 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
        ir_table->base = page_address(pages);
        ir_table->bitmap = bitmap;
        iommu->ir_table = ir_table;
+
+       /*
+        * If the queued invalidation is already initialized,
+        * shouldn't disable it.
+        */
+       if (!iommu->qi) {
+               /*
+                * Clear previous faults.
+                */
+               dmar_fault(-1, iommu);
+               dmar_disable_qi(iommu);
+
+               if (dmar_enable_qi(iommu)) {
+                       pr_err("Failed to enable queued invalidation\n");
+                       goto out_free_bitmap;
+               }
+       }
+
+       init_ir_status(iommu);
+
+       if (ir_pre_enabled(iommu)) {
+               if (iommu_load_old_irte(iommu))
+                       pr_err("Failed to copy IR table for %s from previous kernel\n",
+                              iommu->name);
+               else
+                       pr_info("Copied IR table for %s from previous kernel\n",
+                               iommu->name);
+       }
+
+       iommu_set_irq_remapping(iommu, eim_mode);
+
        return 0;
 
+out_free_bitmap:
+       kfree(bitmap);
 out_free_pages:
        __free_pages(pages, INTR_REMAP_PAGE_ORDER);
 out_free_table:
        kfree(ir_table);
+
+       iommu->ir_table  = NULL;
+
        return -ENOMEM;
 }
 
@@ -580,17 +693,17 @@ static void __init intel_cleanup_irq_remapping(void)
        }
 
        if (x2apic_supported())
-               pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
+               pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
 }
 
 static int __init intel_prepare_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
+       int eim = 0;
 
        if (irq_remap_broken) {
-               printk(KERN_WARNING
-                       "This system BIOS has enabled interrupt remapping\n"
+               pr_warn("This system BIOS has enabled interrupt remapping\n"
                        "on a chipset that contains an erratum making that\n"
                        "feature unstable.  To maintain system stability\n"
                        "interrupt remapping is being disabled.  Please\n"
@@ -606,7 +719,7 @@ static int __init intel_prepare_irq_remapping(void)
                return -ENODEV;
 
        if (parse_ioapics_under_ir() != 1) {
-               printk(KERN_INFO "Not enabling interrupt remapping\n");
+               pr_info("Not enabling interrupt remapping\n");
                goto error;
        }
 
@@ -615,85 +728,54 @@ static int __init intel_prepare_irq_remapping(void)
                if (!ecap_ir_support(iommu->ecap))
                        goto error;
 
-       /* Do the allocations early */
-       for_each_iommu(iommu, drhd)
-               if (intel_setup_irq_remapping(iommu))
-                       goto error;
-
-       return 0;
-
-error:
-       intel_cleanup_irq_remapping();
-       return -ENODEV;
-}
-
-static int __init intel_enable_irq_remapping(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       bool setup = false;
-       int eim = 0;
-
+       /* Detect remapping mode: lapic or x2apic */
        if (x2apic_supported()) {
                eim = !dmar_x2apic_optout();
-               if (!eim)
-                       pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
+               if (!eim) {
+                       pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
+                       pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
+               }
        }
 
        for_each_iommu(iommu, drhd) {
-               /*
-                * If the queued invalidation is already initialized,
-                * shouldn't disable it.
-                */
-               if (iommu->qi)
-                       continue;
-
-               /*
-                * Clear previous faults.
-                */
-               dmar_fault(-1, iommu);
-
-               /*
-                * Disable intr remapping and queued invalidation, if already
-                * enabled prior to OS handover.
-                */
-               iommu_disable_irq_remapping(iommu);
-
-               dmar_disable_qi(iommu);
-       }
-
-       /*
-        * check for the Interrupt-remapping support
-        */
-       for_each_iommu(iommu, drhd)
                if (eim && !ecap_eim_support(iommu->ecap)) {
-                       printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
-                              " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
+                       pr_info("%s does not support EIM\n", iommu->name);
                        eim = 0;
                }
+       }
+
        eim_mode = eim;
        if (eim)
                pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
 
-       /*
-        * Enable queued invalidation for all the DRHD's.
-        */
+       /* Do the initializations early */
        for_each_iommu(iommu, drhd) {
-               int ret = dmar_enable_qi(iommu);
-
-               if (ret) {
-                       printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
-                              " invalidation, ecap %Lx, ret %d\n",
-                              drhd->reg_base_addr, iommu->ecap, ret);
+               if (intel_setup_irq_remapping(iommu)) {
+                       pr_err("Failed to setup irq remapping for %s\n",
+                              iommu->name);
                        goto error;
                }
        }
 
+       return 0;
+
+error:
+       intel_cleanup_irq_remapping();
+       return -ENODEV;
+}
+
+static int __init intel_enable_irq_remapping(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool setup = false;
+
        /*
         * Setup Interrupt-remapping for all the DRHD's now.
         */
        for_each_iommu(iommu, drhd) {
-               iommu_set_irq_remapping(iommu, eim);
+               if (!ir_pre_enabled(iommu))
+                       iommu_enable_irq_remapping(iommu);
                setup = true;
        }
 
@@ -709,9 +791,9 @@ static int __init intel_enable_irq_remapping(void)
         */
        x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
 
-       pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
+       pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
 
-       return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
+       return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
 
 error:
        intel_cleanup_irq_remapping();
@@ -930,6 +1012,7 @@ static int reenable_irq_remapping(int eim)
 
                /* Set up interrupt remapping for iommu.*/
                iommu_set_irq_remapping(iommu, eim);
+               iommu_enable_irq_remapping(iommu);
                setup = true;
        }
 
@@ -1145,14 +1228,12 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
        down_read(&dmar_global_lock);
        iommu = map_dev_to_ir(dev);
        if (!iommu) {
-               printk(KERN_ERR
-                      "Unable to map PCI %s to iommu\n", pci_name(dev));
+               pr_err("Unable to map PCI %s to iommu\n", pci_name(dev));
                index = -ENOENT;
        } else {
                index = alloc_irte(iommu, irq, nvec);
                if (index < 0) {
-                       printk(KERN_ERR
-                              "Unable to allocate %d IRTE for PCI %s\n",
+                       pr_err("Unable to allocate %d IRTE for PCI %s\n",
                               nvec, pci_name(dev));
                        index = -ENOSPC;
                }
@@ -1242,28 +1323,12 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
        /* Setup Interrupt-remapping now. */
        ret = intel_setup_irq_remapping(iommu);
        if (ret) {
-               pr_err("DRHD %Lx: failed to allocate resource\n",
-                      iommu->reg_phys);
-               ir_remove_ioapic_hpet_scope(iommu);
-               return ret;
-       }
-
-       if (!iommu->qi) {
-               /* Clear previous faults. */
-               dmar_fault(-1, iommu);
-               iommu_disable_irq_remapping(iommu);
-               dmar_disable_qi(iommu);
-       }
-
-       /* Enable queued invalidation */
-       ret = dmar_enable_qi(iommu);
-       if (!ret) {
-               iommu_set_irq_remapping(iommu, eim);
-       } else {
-               pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
-                      iommu->reg_phys, iommu->ecap, ret);
+               pr_err("Failed to setup irq remapping for %s\n",
+                      iommu->name);
                intel_teardown_irq_remapping(iommu);
                ir_remove_ioapic_hpet_scope(iommu);
+       } else {
+               iommu_enable_irq_remapping(iommu);
        }
 
        return ret;
index d4f527e5667936454bfb49e01db931703b180e73..49e7542510d15caac5622cdb01fdcf8b77bb80e8 100644 (file)
@@ -16,7 +16,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
-#define pr_fmt(fmt)    "%s: " fmt, __func__
+#define pr_fmt(fmt)    "iommu: " fmt
 
 #include <linux/device.h>
 #include <linux/kernel.h>
@@ -51,6 +51,8 @@ struct iommu_group {
        void (*iommu_data_release)(void *iommu_data);
        char *name;
        int id;
+       struct iommu_domain *default_domain;
+       struct iommu_domain *domain;
 };
 
 struct iommu_device {
@@ -75,6 +77,15 @@ struct iommu_group_attribute iommu_group_attr_##_name =              \
 #define to_iommu_group(_kobj)          \
        container_of(_kobj, struct iommu_group, kobj)
 
+static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+                                                unsigned type);
+static int __iommu_attach_device(struct iommu_domain *domain,
+                                struct device *dev);
+static int __iommu_attach_group(struct iommu_domain *domain,
+                               struct iommu_group *group);
+static void __iommu_detach_group(struct iommu_domain *domain,
+                                struct iommu_group *group);
+
 static ssize_t iommu_group_attr_show(struct kobject *kobj,
                                     struct attribute *__attr, char *buf)
 {
@@ -128,6 +139,8 @@ static void iommu_group_release(struct kobject *kobj)
 {
        struct iommu_group *group = to_iommu_group(kobj);
 
+       pr_debug("Releasing group %d\n", group->id);
+
        if (group->iommu_data_release)
                group->iommu_data_release(group->iommu_data);
 
@@ -135,6 +148,9 @@ static void iommu_group_release(struct kobject *kobj)
        ida_remove(&iommu_group_ida, group->id);
        mutex_unlock(&iommu_group_mutex);
 
+       if (group->default_domain)
+               iommu_domain_free(group->default_domain);
+
        kfree(group->name);
        kfree(group);
 }
@@ -207,6 +223,8 @@ again:
         */
        kobject_put(&group->kobj);
 
+       pr_debug("Allocated group %d\n", group->id);
+
        return group;
 }
 EXPORT_SYMBOL_GPL(iommu_group_alloc);
@@ -307,6 +325,52 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
 }
 EXPORT_SYMBOL_GPL(iommu_group_set_name);
 
+static int iommu_group_create_direct_mappings(struct iommu_group *group,
+                                             struct device *dev)
+{
+       struct iommu_domain *domain = group->default_domain;
+       struct iommu_dm_region *entry;
+       struct list_head mappings;
+       unsigned long pg_size;
+       int ret = 0;
+
+       if (!domain || domain->type != IOMMU_DOMAIN_DMA)
+               return 0;
+
+       BUG_ON(!domain->ops->pgsize_bitmap);
+
+       pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
+       INIT_LIST_HEAD(&mappings);
+
+       iommu_get_dm_regions(dev, &mappings);
+
+       /* We need to consider overlapping regions for different devices */
+       list_for_each_entry(entry, &mappings, list) {
+               dma_addr_t start, end, addr;
+
+               start = ALIGN(entry->start, pg_size);
+               end   = ALIGN(entry->start + entry->length, pg_size);
+
+               for (addr = start; addr < end; addr += pg_size) {
+                       phys_addr_t phys_addr;
+
+                       phys_addr = iommu_iova_to_phys(domain, addr);
+                       if (phys_addr)
+                               continue;
+
+                       ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
+                       if (ret)
+                               goto out;
+               }
+
+       }
+
+out:
+       iommu_put_dm_regions(dev, &mappings);
+
+       return ret;
+}
+
 /**
  * iommu_group_add_device - add a device to an iommu group
  * @group: the group into which to add the device (reference should be held)
@@ -363,8 +427,12 @@ rename:
 
        dev->iommu_group = group;
 
+       iommu_group_create_direct_mappings(group, dev);
+
        mutex_lock(&group->mutex);
        list_add_tail(&device->list, &group->devices);
+       if (group->domain)
+               __iommu_attach_device(group->domain, dev);
        mutex_unlock(&group->mutex);
 
        /* Notify any listeners about change to group. */
@@ -372,6 +440,9 @@ rename:
                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 
        trace_add_device_to_group(group->id, dev);
+
+       pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -388,6 +459,8 @@ void iommu_group_remove_device(struct device *dev)
        struct iommu_group *group = dev->iommu_group;
        struct iommu_device *tmp_device, *device = NULL;
 
+       pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
+
        /* Pre-notify listeners that a device is being removed. */
        blocking_notifier_call_chain(&group->notifier,
                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
@@ -417,6 +490,17 @@ void iommu_group_remove_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 
+static int iommu_group_device_count(struct iommu_group *group)
+{
+       struct iommu_device *entry;
+       int ret = 0;
+
+       list_for_each_entry(entry, &group->devices, list)
+               ret++;
+
+       return ret;
+}
+
 /**
  * iommu_group_for_each_dev - iterate over each device in the group
  * @group: the group
@@ -428,19 +512,30 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
  * The group->mutex is held across callbacks, which will block calls to
  * iommu_group_add/remove_device.
  */
-int iommu_group_for_each_dev(struct iommu_group *group, void *data,
-                            int (*fn)(struct device *, void *))
+static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
+                                     int (*fn)(struct device *, void *))
 {
        struct iommu_device *device;
        int ret = 0;
 
-       mutex_lock(&group->mutex);
        list_for_each_entry(device, &group->devices, list) {
                ret = fn(device->dev, data);
                if (ret)
                        break;
        }
+       return ret;
+}
+
+
+int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+                            int (*fn)(struct device *, void *))
+{
+       int ret;
+
+       mutex_lock(&group->mutex);
+       ret = __iommu_group_for_each_dev(group, data, fn);
        mutex_unlock(&group->mutex);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
@@ -692,7 +787,19 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
                return group;
 
        /* No shared group found, allocate new */
-       return iommu_group_alloc();
+       group = iommu_group_alloc();
+       if (IS_ERR(group))
+               return NULL;
+
+       /*
+        * Try to allocate a default domain - needs support from the
+        * IOMMU driver.
+        */
+       group->default_domain = __iommu_domain_alloc(pdev->dev.bus,
+                                                    IOMMU_DOMAIN_DMA);
+       group->domain = group->default_domain;
+
+       return group;
 }
 
 /**
@@ -731,6 +838,11 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
        return group;
 }
 
+struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
+{
+       return group->default_domain;
+}
+
 static int add_iommu_group(struct device *dev, void *data)
 {
        struct iommu_callback_data *cb = data;
@@ -741,7 +853,16 @@ static int add_iommu_group(struct device *dev, void *data)
 
        WARN_ON(dev->iommu_group);
 
-       ops->add_device(dev);
+       return ops->add_device(dev);
+}
+
+static int remove_iommu_group(struct device *dev, void *data)
+{
+       struct iommu_callback_data *cb = data;
+       const struct iommu_ops *ops = cb->ops;
+
+       if (ops->remove_device && dev->iommu_group)
+               ops->remove_device(dev);
 
        return 0;
 }
@@ -761,7 +882,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
        if (action == BUS_NOTIFY_ADD_DEVICE) {
                if (ops->add_device)
                        return ops->add_device(dev);
-       } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+       } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
                if (ops->remove_device && dev->iommu_group) {
                        ops->remove_device(dev);
                        return 0;
@@ -814,19 +935,25 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
        nb->notifier_call = iommu_bus_notifier;
 
        err = bus_register_notifier(bus, nb);
-       if (err) {
-               kfree(nb);
-               return err;
-       }
+       if (err)
+               goto out_free;
 
        err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
-       if (err) {
-               bus_unregister_notifier(bus, nb);
-               kfree(nb);
-               return err;
-       }
+       if (err)
+               goto out_err;
+
 
        return 0;
+
+out_err:
+       /* Clean up */
+       bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
+       bus_unregister_notifier(bus, nb);
+
+out_free:
+       kfree(nb);
+
+       return err;
 }
 
 /**
@@ -898,22 +1025,28 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
-struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
+static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+                                                unsigned type)
 {
        struct iommu_domain *domain;
 
        if (bus == NULL || bus->iommu_ops == NULL)
                return NULL;
 
-       domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+       domain = bus->iommu_ops->domain_alloc(type);
        if (!domain)
                return NULL;
 
        domain->ops  = bus->iommu_ops;
-       domain->type = IOMMU_DOMAIN_UNMANAGED;
+       domain->type = type;
 
        return domain;
 }
+
+struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
+{
+       return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
+}
 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
 
 void iommu_domain_free(struct iommu_domain *domain)
@@ -922,7 +1055,8 @@ void iommu_domain_free(struct iommu_domain *domain)
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
 
-int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
+static int __iommu_attach_device(struct iommu_domain *domain,
+                                struct device *dev)
 {
        int ret;
        if (unlikely(domain->ops->attach_dev == NULL))
@@ -933,9 +1067,38 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
                trace_attach_device_to_domain(dev);
        return ret;
 }
+
+int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
+{
+       struct iommu_group *group;
+       int ret;
+
+       group = iommu_group_get(dev);
+       /* FIXME: Remove this when groups a mandatory for iommu drivers */
+       if (group == NULL)
+               return __iommu_attach_device(domain, dev);
+
+       /*
+        * We have a group - lock it to make sure the device-count doesn't
+        * change while we are attaching
+        */
+       mutex_lock(&group->mutex);
+       ret = -EINVAL;
+       if (iommu_group_device_count(group) != 1)
+               goto out_unlock;
+
+       ret = __iommu_attach_group(domain, group);
+
+out_unlock:
+       mutex_unlock(&group->mutex);
+       iommu_group_put(group);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(iommu_attach_device);
 
-void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
+static void __iommu_detach_device(struct iommu_domain *domain,
+                                 struct device *dev)
 {
        if (unlikely(domain->ops->detach_dev == NULL))
                return;
@@ -943,8 +1106,48 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
        domain->ops->detach_dev(domain, dev);
        trace_detach_device_from_domain(dev);
 }
+
+void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
+{
+       struct iommu_group *group;
+
+       group = iommu_group_get(dev);
+       /* FIXME: Remove this when groups a mandatory for iommu drivers */
+       if (group == NULL)
+               return __iommu_detach_device(domain, dev);
+
+       mutex_lock(&group->mutex);
+       if (iommu_group_device_count(group) != 1) {
+               WARN_ON(1);
+               goto out_unlock;
+       }
+
+       __iommu_detach_group(domain, group);
+
+out_unlock:
+       mutex_unlock(&group->mutex);
+       iommu_group_put(group);
+}
 EXPORT_SYMBOL_GPL(iommu_detach_device);
 
+struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+       struct iommu_domain *domain;
+       struct iommu_group *group;
+
+       group = iommu_group_get(dev);
+       /* FIXME: Remove this when groups a mandatory for iommu drivers */
+       if (group == NULL)
+               return NULL;
+
+       domain = group->domain;
+
+       iommu_group_put(group);
+
+       return domain;
+}
+EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
+
 /*
  * IOMMU groups are really the natrual working unit of the IOMMU, but
  * the IOMMU API works on domains and devices.  Bridge that gap by
@@ -959,13 +1162,34 @@ static int iommu_group_do_attach_device(struct device *dev, void *data)
 {
        struct iommu_domain *domain = data;
 
-       return iommu_attach_device(domain, dev);
+       return __iommu_attach_device(domain, dev);
+}
+
+static int __iommu_attach_group(struct iommu_domain *domain,
+                               struct iommu_group *group)
+{
+       int ret;
+
+       if (group->default_domain && group->domain != group->default_domain)
+               return -EBUSY;
+
+       ret = __iommu_group_for_each_dev(group, domain,
+                                        iommu_group_do_attach_device);
+       if (ret == 0)
+               group->domain = domain;
+
+       return ret;
 }
 
 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
 {
-       return iommu_group_for_each_dev(group, domain,
-                                       iommu_group_do_attach_device);
+       int ret;
+
+       mutex_lock(&group->mutex);
+       ret = __iommu_attach_group(domain, group);
+       mutex_unlock(&group->mutex);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_attach_group);
 
@@ -973,14 +1197,40 @@ static int iommu_group_do_detach_device(struct device *dev, void *data)
 {
        struct iommu_domain *domain = data;
 
-       iommu_detach_device(domain, dev);
+       __iommu_detach_device(domain, dev);
 
        return 0;
 }
 
+static void __iommu_detach_group(struct iommu_domain *domain,
+                                struct iommu_group *group)
+{
+       int ret;
+
+       if (!group->default_domain) {
+               __iommu_group_for_each_dev(group, domain,
+                                          iommu_group_do_detach_device);
+               group->domain = NULL;
+               return;
+       }
+
+       if (group->domain == group->default_domain)
+               return;
+
+       /* Detach by re-attaching to the default domain */
+       ret = __iommu_group_for_each_dev(group, group->default_domain,
+                                        iommu_group_do_attach_device);
+       if (ret != 0)
+               WARN_ON(1);
+       else
+               group->domain = group->default_domain;
+}
+
 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
 {
-       iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
+       mutex_lock(&group->mutex);
+       __iommu_detach_group(domain, group);
+       mutex_unlock(&group->mutex);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_group);
 
@@ -1207,7 +1457,7 @@ static int __init iommu_init(void)
 
        return 0;
 }
-arch_initcall(iommu_init);
+core_initcall(iommu_init);
 
 int iommu_domain_get_attr(struct iommu_domain *domain,
                          enum iommu_attr attr, void *data)
@@ -1273,3 +1523,72 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
        return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
+
+void iommu_get_dm_regions(struct device *dev, struct list_head *list)
+{
+       const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+       if (ops && ops->get_dm_regions)
+               ops->get_dm_regions(dev, list);
+}
+
+void iommu_put_dm_regions(struct device *dev, struct list_head *list)
+{
+       const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+       if (ops && ops->put_dm_regions)
+               ops->put_dm_regions(dev, list);
+}
+
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+       struct iommu_domain *dm_domain;
+       struct iommu_group *group;
+       int ret;
+
+       /* Device must already be in a group before calling this function */
+       group = iommu_group_get_for_dev(dev);
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       mutex_lock(&group->mutex);
+
+       /* Check if the default domain is already direct mapped */
+       ret = 0;
+       if (group->default_domain &&
+           group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+               goto out;
+
+       /* Don't change mappings of existing devices */
+       ret = -EBUSY;
+       if (iommu_group_device_count(group) != 1)
+               goto out;
+
+       /* Allocate a direct mapped domain */
+       ret = -ENOMEM;
+       dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
+       if (!dm_domain)
+               goto out;
+
+       /* Attach the device to the domain */
+       ret = __iommu_attach_group(dm_domain, group);
+       if (ret) {
+               iommu_domain_free(dm_domain);
+               goto out;
+       }
+
+       /* Make the direct mapped domain the default for this group */
+       if (group->default_domain)
+               iommu_domain_free(group->default_domain);
+       group->default_domain = dm_domain;
+
+       pr_info("Using direct mapping for device %s\n", dev_name(dev));
+
+       ret = 0;
+out:
+       mutex_unlock(&group->mutex);
+       iommu_group_put(group);
+
+       return ret;
+}
index 9dd8208312c2e75874ce70dcc6008a166eb5ffca..b7c3d923f3e1c0569c42492d435b7c4d9a321caa 100644 (file)
@@ -227,6 +227,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
        /* Figure out where to put new node */
        while (*new) {
                struct iova *this = container_of(*new, struct iova, node);
+
                parent = *new;
 
                if (iova->pfn_lo < this->pfn_lo)
@@ -350,6 +351,7 @@ void
 free_iova(struct iova_domain *iovad, unsigned long pfn)
 {
        struct iova *iova = find_iova(iovad, pfn);
+
        if (iova)
                __free_iova(iovad, iova);
 
@@ -369,6 +371,7 @@ void put_iova_domain(struct iova_domain *iovad)
        node = rb_first(&iovad->rbroot);
        while (node) {
                struct iova *iova = container_of(node, struct iova, node);
+
                rb_erase(node, &iovad->rbroot);
                free_iova_mem(iova);
                node = rb_first(&iovad->rbroot);
@@ -482,6 +485,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
        for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
                struct iova *iova = container_of(node, struct iova, node);
                struct iova *new_iova;
+
                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
                if (!new_iova)
                        printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
index cab214544237cf6f89754c3878f0e57f66ba360d..ebf0adb8e7ea729f5cab436de29a8e18afd8d78c 100644 (file)
@@ -551,6 +551,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
        spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 }
 
+static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
+                                        dma_addr_t iova, size_t size)
+{
+       rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+       if (size > SPAGE_SIZE)
+               rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
+                                       SPAGE_SIZE);
+}
+
 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
                                  dma_addr_t iova)
 {
@@ -575,12 +584,6 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
        rk_table_flush(page_table, NUM_PT_ENTRIES);
        rk_table_flush(dte_addr, 1);
 
-       /*
-        * Zap the first iova of newly allocated page table so iommu evicts
-        * old cached value of new dte from the iotlb.
-        */
-       rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
-
 done:
        pt_phys = rk_dte_pt_address(dte);
        return (u32 *)phys_to_virt(pt_phys);
@@ -630,6 +633,14 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
 
        rk_table_flush(pte_addr, pte_count);
 
+       /*
+        * Zap the first and last iova to evict from iotlb any previously
+        * mapped cachelines holding stale values for its dte and pte.
+        * We only zap the first and last iova, since only they could have
+        * dte or pte shared with an existing mapping.
+        */
+       rk_iommu_zap_iova_first_last(rk_domain, iova, size);
+
        return 0;
 unwind:
        /* Unmap the range of iovas that we just mapped */
@@ -774,7 +785,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        list_add_tail(&iommu->node, &rk_domain->iommus);
        spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-       dev_info(dev, "Attached to iommu domain\n");
+       dev_dbg(dev, "Attached to iommu domain\n");
 
        rk_iommu_disable_stall(iommu);
 
@@ -808,7 +819,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
 
        iommu->domain = NULL;
 
-       dev_info(dev, "Detached from iommu domain\n");
+       dev_dbg(dev, "Detached from iommu domain\n");
 }
 
 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
index 57f09cb544644bcd97aa81bfc044c2686b350bbb..269c2354c43169307aa02438dbf38aa4b54f0dad 100644 (file)
@@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void)
                                  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
 }
 
-static void gic_handle_shared_int(void)
+static void gic_handle_shared_int(bool chained)
 {
        unsigned int i, intr, virq;
        unsigned long *pcpu_mask;
@@ -299,7 +299,10 @@ static void gic_handle_shared_int(void)
        while (intr != gic_shared_intrs) {
                virq = irq_linear_revmap(gic_irq_domain,
                                         GIC_SHARED_TO_HWIRQ(intr));
-               do_IRQ(virq);
+               if (chained)
+                       generic_handle_irq(virq);
+               else
+                       do_IRQ(virq);
 
                /* go to next pending bit */
                bitmap_clear(pending, intr, 1);
@@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = {
 #endif
 };
 
-static void gic_handle_local_int(void)
+static void gic_handle_local_int(bool chained)
 {
        unsigned long pending, masked;
        unsigned int intr, virq;
@@ -445,7 +448,10 @@ static void gic_handle_local_int(void)
        while (intr != GIC_NUM_LOCAL_INTRS) {
                virq = irq_linear_revmap(gic_irq_domain,
                                         GIC_LOCAL_TO_HWIRQ(intr));
-               do_IRQ(virq);
+               if (chained)
+                       generic_handle_irq(virq);
+               else
+                       do_IRQ(virq);
 
                /* go to next pending bit */
                bitmap_clear(&pending, intr, 1);
@@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = {
 
 static void __gic_irq_dispatch(void)
 {
-       gic_handle_local_int();
-       gic_handle_shared_int();
+       gic_handle_local_int(false);
+       gic_handle_shared_int(false);
 }
 
 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
 {
-       __gic_irq_dispatch();
+       gic_handle_local_int(true);
+       gic_handle_shared_int(true);
 }
 
 #ifdef CONFIG_MIPS_GIC_IPI
index 4a9ce5b50c5bba33b7428a0b67b88d26e31c4067..6b2b582433bde95062e85d17403e4a505c5a4ef9 100644 (file)
@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
        irqd_set_trigger_type(data, flow_type);
        irq_setup_alt_chip(data, flow_type);
 
-       for (i = 0; i <= gc->num_ct; i++, ct++)
+       for (i = 0; i < gc->num_ct; i++, ct++)
                if (ct->type & flow_type)
                        ctrl_off = ct->regs.type;
 
index 7dc93aa004c86cfa988993d53164ea1d665aff97..312ffd3d00177ca5a5e21393377c760e9bdec91e 100644 (file)
@@ -173,7 +173,7 @@ static void unmap_switcher(void)
 bool lguest_address_ok(const struct lguest *lg,
                       unsigned long addr, unsigned long len)
 {
-       return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+       return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
 }
 
 /*
index 63953477a07c36e771a32d5bde686bd0f05890f1..eff7bdd7731d5e437d3b83ca4803ac8c03bac6b6 100644 (file)
@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                /* blk-mq request-based interface */
                *__clone = blk_get_request(bdev_get_queue(bdev),
                                           rq_data_dir(rq), GFP_ATOMIC);
-               if (IS_ERR(*__clone))
+               if (IS_ERR(*__clone)) {
                        /* ENOMEM, requeue */
+                       clear_mapinfo(m, map_context);
                        return r;
+               }
                (*__clone)->bio = (*__clone)->biotail = NULL;
                (*__clone)->rq_disk = bdev->bd_disk;
                (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
index d9b00b8565c6dc1a36f5a3d863baa370126da593..16ba55ad708992f7e942b2f6ce2048d12be5c1b6 100644 (file)
@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 }
 EXPORT_SYMBOL(dm_consume_args);
 
+static bool __table_type_request_based(unsigned table_type)
+{
+       return (table_type == DM_TYPE_REQUEST_BASED ||
+               table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
 static int dm_table_set_type(struct dm_table *t)
 {
        unsigned i;
@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
                 * Determine the type from the live device.
                 * Default to bio-based if device is new.
                 */
-               if (live_md_type == DM_TYPE_REQUEST_BASED ||
-                   live_md_type == DM_TYPE_MQ_REQUEST_BASED)
+               if (__table_type_request_based(live_md_type))
                        request_based = 1;
                else
                        bio_based = 1;
@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
                        }
                t->type = DM_TYPE_MQ_REQUEST_BASED;
 
-       } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
+       } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
                /* inherit live MD type */
                t->type = live_md_type;
 
@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 
 bool dm_table_request_based(struct dm_table *t)
 {
-       unsigned table_type = dm_table_get_type(t);
-
-       return (table_type == DM_TYPE_REQUEST_BASED ||
-               table_type == DM_TYPE_MQ_REQUEST_BASED);
+       return __table_type_request_based(dm_table_get_type(t));
 }
 
 bool dm_table_mq_request_based(struct dm_table *t)
index a930b72314ac985da702f8b47a8054a75b2e2ba8..2caf492890d64b27a0a88f24f4f04d1778448d9a 100644 (file)
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        dm_put(md);
 }
 
-static void free_rq_clone(struct request *clone, bool must_be_mapped)
+static void free_rq_clone(struct request *clone)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
        struct mapped_device *md = tio->md;
 
-       WARN_ON_ONCE(must_be_mapped && !clone->q);
-
        blk_rq_unprep_clone(clone);
 
        if (md->type == DM_TYPE_MQ_REQUEST_BASED)
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       free_rq_clone(clone, true);
+       free_rq_clone(clone);
        if (!rq->q->mq_ops)
                blk_end_request_all(rq, error);
        else
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
        }
 
        if (clone)
-               free_rq_clone(clone, false);
+               free_rq_clone(clone);
 }
 
 /*
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, rq);
+       blk_run_queue_async(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors;
-       int max_size = 0;
+       sector_t max_sectors, max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
                          (sector_t) queue_max_sectors(q));
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-       if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
-               max_size = 0;
+
+       /*
+        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
+        * to the targets' merge function since it holds sectors not bytes).
+        * Just doing this as an interim fix for stable@ because the more
+        * comprehensive cleanup of switching to sector_t will impact every
+        * DM target that implements a ->merge hook.
+        */
+       if (max_size > INT_MAX)
+               max_size = INT_MAX;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
        /*
         * If the target doesn't support merge method and some of the devices
         * provided their merge_bvec method (we know this by looking for the
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
                        dm_kill_unmapped_request(rq, r);
                        return r;
                }
-               if (IS_ERR(clone))
-                       return DM_MAPIO_REQUEUE;
+               if (r != DM_MAPIO_REMAPPED)
+                       return r;
                if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
                        /* -ENOMEM */
                        ti->type->release_clone_rq(clone);
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
                /* clone request is allocated at the end of the pdu */
                tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
-               if (!clone_rq(rq, md, tio, GFP_ATOMIC))
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+               (void) clone_rq(rq, md, tio, GFP_ATOMIC);
                queue_kthread_work(&md->kworker, &tio->work);
        } else {
                /* Direct call is fine since .queue_rq allows allocations */
-               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
-                       dm_requeue_unmapped_original_request(md, rq);
+               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+                       /* Undo dm_start_request() before requeuing */
+                       rq_completed(md, rq_data_dir(rq), false);
+                       return BLK_MQ_RQ_QUEUE_BUSY;
+               }
        }
 
        return BLK_MQ_RQ_QUEUE_OK;
index 593a02476c781a2b5ee7e491b9188a108630b506..4dbed4a67aaf40e3c04bde925870c24d13cd1b4e 100644 (file)
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                                err = -EBUSY;
                }
                spin_unlock(&mddev->lock);
-               return err;
+               return err ?: len;
        }
        err = mddev_lock(mddev);
        if (err)
@@ -4211,34 +4211,36 @@ action_store(struct mddev *mddev, const char *page, size_t len)
        if (!mddev->pers || !mddev->pers->sync_request)
                return -EINVAL;
 
-       if (cmd_match(page, "frozen"))
-               set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       else
-               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
        if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
-               flush_workqueue(md_misc_wq);
-               if (mddev->sync_thread) {
-                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       if (mddev_lock(mddev) == 0) {
+               if (cmd_match(page, "frozen"))
+                       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               else
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+                   mddev_lock(mddev) == 0) {
+                       flush_workqueue(md_misc_wq);
+                       if (mddev->sync_thread) {
+                               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                                md_reap_sync_thread(mddev);
-                               mddev_unlock(mddev);
                        }
+                       mddev_unlock(mddev);
                }
        } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
        else if (cmd_match(page, "resync"))
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        else if (cmd_match(page, "recover")) {
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        } else if (cmd_match(page, "reshape")) {
                int err;
                if (mddev->pers->start_reshape == NULL)
                        return -EINVAL;
                err = mddev_lock(mddev);
                if (!err) {
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                        err = mddev->pers->start_reshape(mddev);
                        mddev_unlock(mddev);
                }
@@ -4250,6 +4252,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                        set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                else if (!cmd_match(page, "repair"))
                        return -EINVAL;
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        }
@@ -8259,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
        if (mddev_is_clustered(mddev))
                md_cluster_ops->metadata_update_finish(mddev);
        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
index e793ab6b35705e0ed1ad6904ebe9353b6dbf6fd6..f55c3f35b7463141086afb727785c775c5185d76 100644 (file)
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 
index b9f2b9cc60607e29c2c240b2516e5c8af49091d3..b6793d2e051f3b278405f236e6623980bcdf1d04 100644 (file)
@@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 static bool stripe_can_batch(struct stripe_head *sh)
 {
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+               !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
                is_full_stripe_write(sh);
 }
 
@@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                    < IO_THRESHOLD)
                        md_wakeup_thread(conf->mddev->thread);
 
+       if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
+               int seq = sh->bm_seq;
+               if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
+                   sh->batch_head->bm_seq > seq)
+                       seq = sh->batch_head->bm_seq;
+               set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
+               sh->batch_head->bm_seq = seq;
+       }
+
        atomic_inc(&sh->count);
 unlock_out:
        unlock_two_stripes(head, sh);
@@ -2987,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
-       spin_unlock_irq(&sh->stripe_lock);
 
        if (conf->mddev->bitmap && firstwrite) {
+               /* Cannot hold spinlock over bitmap_startwrite,
+                * but must ensure this isn't added to a batch until
+                * we have added to the bitmap and set bm_seq.
+                * So set STRIPE_BITMAP_PENDING to prevent
+                * batching.
+                * If multiple add_stripe_bio() calls race here they
+                * much all set STRIPE_BITMAP_PENDING.  So only the first one
+                * to complete "bitmap_startwrite" gets to set
+                * STRIPE_BIT_DELAY.  This is important as once a stripe
+                * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+                * any more.
+                */
+               set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               spin_unlock_irq(&sh->stripe_lock);
                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
                                  STRIPE_SECTORS, 0);
-               sh->bm_seq = conf->seq_flush+1;
-               set_bit(STRIPE_BIT_DELAY, &sh->state);
+               spin_lock_irq(&sh->stripe_lock);
+               clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               if (!sh->batch_head) {
+                       sh->bm_seq = conf->seq_flush+1;
+                       set_bit(STRIPE_BIT_DELAY, &sh->state);
+               }
        }
+       spin_unlock_irq(&sh->stripe_lock);
 
        if (stripe_can_batch(sh))
                stripe_add_to_batch_list(conf, sh);
@@ -3392,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh,
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags);
 /* handle_stripe_clean_event
  * any written block on an uptodate or failed drive can be returned.
  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
@@ -3405,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf,
        int discard_pending = 0;
        struct stripe_head *head_sh = sh;
        bool do_endio = false;
-       int wakeup_nr = 0;
 
        for (i = disks; i--; )
                if (sh->dev[i].written) {
@@ -3494,44 +3523,8 @@ unhash:
                if (atomic_dec_and_test(&conf->pending_full_writes))
                        md_wakeup_thread(conf->mddev->thread);
 
-       if (!head_sh->batch_head || !do_endio)
-               return;
-       for (i = 0; i < head_sh->disks; i++) {
-               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
-                       wakeup_nr++;
-       }
-       while (!list_empty(&head_sh->batch_list)) {
-               int i;
-               sh = list_first_entry(&head_sh->batch_list,
-                                     struct stripe_head, batch_list);
-               list_del_init(&sh->batch_list);
-
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
-               sh->check_state = head_sh->check_state;
-               sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++) {
-                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
-                               wakeup_nr++;
-                       sh->dev[i].flags = head_sh->dev[i].flags;
-               }
-
-               spin_lock_irq(&sh->stripe_lock);
-               sh->batch_head = NULL;
-               spin_unlock_irq(&sh->stripe_lock);
-               if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
-                       set_bit(STRIPE_HANDLE, &sh->state);
-               release_stripe(sh);
-       }
-
-       spin_lock_irq(&head_sh->stripe_lock);
-       head_sh->batch_head = NULL;
-       spin_unlock_irq(&head_sh->stripe_lock);
-       wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
-       if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
-               set_bit(STRIPE_HANDLE, &head_sh->state);
+       if (head_sh->batch_head && do_endio)
+               break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 
 static void handle_stripe_dirtying(struct r5conf *conf,
@@ -4172,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
 static int clear_batch_ready(struct stripe_head *sh)
 {
+       /* Return '1' if this is a member of batch, or
+        * '0' if it is a lone stripe or a head which can now be
+        * handled.
+        */
        struct stripe_head *tmp;
        if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
-               return 0;
+               return (sh->batch_head && sh->batch_head != sh);
        spin_lock(&sh->stripe_lock);
        if (!sh->batch_head) {
                spin_unlock(&sh->stripe_lock);
@@ -4202,38 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh)
        return 0;
 }
 
-static void check_break_stripe_batch_list(struct stripe_head *sh)
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags)
 {
-       struct stripe_head *head_sh, *next;
+       struct stripe_head *sh, *next;
        int i;
-
-       if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
-               return;
-
-       head_sh = sh;
+       int do_wakeup = 0;
 
        list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
 
                list_del_init(&sh->batch_list);
 
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                (1 << STRIPE_DEGRADED) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
+               WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+                                         (1 << STRIPE_SYNCING) |
+                                         (1 << STRIPE_REPLACED) |
+                                         (1 << STRIPE_PREREAD_ACTIVE) |
+                                         (1 << STRIPE_DELAYED) |
+                                         (1 << STRIPE_BIT_DELAY) |
+                                         (1 << STRIPE_FULL_WRITE) |
+                                         (1 << STRIPE_BIOFILL_RUN) |
+                                         (1 << STRIPE_COMPUTE_RUN)  |
+                                         (1 << STRIPE_OPS_REQ_PENDING) |
+                                         (1 << STRIPE_DISCARD) |
+                                         (1 << STRIPE_BATCH_READY) |
+                                         (1 << STRIPE_BATCH_ERR) |
+                                         (1 << STRIPE_BITMAP_PENDING)));
+               WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+                                             (1 << STRIPE_REPLACED)));
+
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_DEGRADED)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+
                sh->check_state = head_sh->check_state;
                sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++)
+               for (i = 0; i < sh->disks; i++) {
+                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+                               do_wakeup = 1;
                        sh->dev[i].flags = head_sh->dev[i].flags &
                                (~((1 << R5_WriteError) | (1 << R5_Overlap)));
-
+               }
                spin_lock_irq(&sh->stripe_lock);
                sh->batch_head = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-
-               set_bit(STRIPE_HANDLE, &sh->state);
+               if (handle_flags == 0 ||
+                   sh->state & handle_flags)
+                       set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
        }
+       spin_lock_irq(&head_sh->stripe_lock);
+       head_sh->batch_head = NULL;
+       spin_unlock_irq(&head_sh->stripe_lock);
+       for (i = 0; i < head_sh->disks; i++)
+               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+                       do_wakeup = 1;
+       if (head_sh->state & handle_flags)
+               set_bit(STRIPE_HANDLE, &head_sh->state);
+
+       if (do_wakeup)
+               wake_up(&head_sh->raid_conf->wait_for_overlap);
 }
 
 static void handle_stripe(struct stripe_head *sh)
@@ -4258,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh)
                return;
        }
 
-       check_break_stripe_batch_list(sh);
+       if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+               break_stripe_batch_list(sh, 0);
 
        if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
                spin_lock(&sh->stripe_lock);
@@ -4312,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh)
        if (s.failed > conf->max_degraded) {
                sh->check_state = 0;
                sh->reconstruct_state = 0;
+               break_stripe_batch_list(sh, 0);
                if (s.to_read+s.to_write+s.written)
                        handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
                if (s.syncing + s.replacing)
@@ -7328,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
        mddev->sync_thread = md_register_thread(md_do_sync, mddev,
index 7dc0dd86074b1702276ccb51ba166a38d5d0f7e3..896d603ad0da964d2c45f22039d8b733f0bef26e 100644 (file)
@@ -337,9 +337,12 @@ enum {
        STRIPE_ON_RELEASE_LIST,
        STRIPE_BATCH_READY,
        STRIPE_BATCH_ERR,
+       STRIPE_BITMAP_PENDING,  /* Being added to bitmap, don't add
+                                * to batch yet.
+                                */
 };
 
-#define STRIPE_EXPAND_SYNC_FLAG \
+#define STRIPE_EXPAND_SYNC_FLAGS \
        ((1 << STRIPE_EXPAND_SOURCE) |\
        (1 << STRIPE_EXPAND_READY) |\
        (1 << STRIPE_EXPANDING) |\
index ae498b53ee4042ef3e39e6f77a7272cffe4abe74..46e3840c7a37392402deb53a7a9eb2cb7b8b27b6 100644 (file)
@@ -431,6 +431,10 @@ int da9052_adc_read_temp(struct da9052 *da9052)
 EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
 
 static const struct mfd_cell da9052_subdev_info[] = {
+       {
+               .name = "da9052-regulator",
+               .id = 0,
+       },
        {
                .name = "da9052-regulator",
                .id = 1,
@@ -483,10 +487,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
                .name = "da9052-regulator",
                .id = 13,
        },
-       {
-               .name = "da9052-regulator",
-               .id = 14,
-       },
        {
                .name = "da9052-onkey",
        },
index db84ddcfec8464191a3edcccfd87c869ac1c5a7c..9fd6c69a8bac3c77d1c0c6e99eb4f3644561f78a 100644 (file)
@@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
-                       disable_irq(channel->dma_irq);
+                       disable_irq_nosync(channel->dma_irq);
                else
                        xgbe_disable_rx_tx_ints(pdata);
 
index 77363d6805321534a582e579552f46e254737e25..a3b1c07ae0af0935f3026ba8a56e21512e238e36 100644 (file)
@@ -2464,6 +2464,7 @@ err_out_powerdown:
        ssb_bus_may_powerdown(sdev->bus);
 
 err_out_free_dev:
+       netif_napi_del(&bp->napi);
        free_netdev(dev);
 
 out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
                b44_unregister_phy_one(bp);
        ssb_device_disable(sdev, 0);
        ssb_bus_may_powerdown(sdev->bus);
+       netif_napi_del(&bp->napi);
        free_netdev(dev);
        ssb_pcihost_set_power_state(sdev, PCI_D3hot);
        ssb_set_drvdata(sdev, NULL);
index a3b0f7a0c61e0d6ffeefcd88ae81ab751554e085..1f82a04ce01a8468e7d8dde208babdea4220ab88 100644 (file)
@@ -1774,7 +1774,7 @@ struct bnx2x {
        int                     stats_state;
 
        /* used for synchronization of concurrent threads statistics handling */
-       struct mutex            stats_lock;
+       struct semaphore        stats_lock;
 
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
index fd52ce95127ef98b7c0687594024357b05c1a848..33501bcddc48eb1f6157a08e3e3d1e08dc087c25 100644 (file)
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
-       mutex_init(&bp->stats_lock);
+       sema_init(&bp->stats_lock, 1);
        bp->drv_info_mng_owner = false;
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       mutex_lock(&bp->stats_lock);
-       bp->stats_state = STATS_STATE_DISABLED;
-       mutex_unlock(&bp->stats_lock);
+       if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+               bp->stats_state = STATS_STATE_DISABLED;
+               up(&bp->stats_lock);
+       }
 
        bnx2x_save_statistics(bp);
 
index 266b055c2360af759c7f78395636d541210e5b9d..69d699f0730a3bd4d8980607e0a36cd8da461f1e 100644 (file)
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
         * that context in case someone is in the middle of a transition.
         * For other events, wait a bit until lock is taken.
         */
-       if (!mutex_trylock(&bp->stats_lock)) {
+       if (down_trylock(&bp->stats_lock)) {
                if (event == STATS_EVENT_UPDATE)
                        return;
 
                DP(BNX2X_MSG_STATS,
                   "Unlikely stats' lock contention [event %d]\n", event);
-               mutex_lock(&bp->stats_lock);
+               if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+                       BNX2X_ERR("Failed to take stats lock [event %d]\n",
+                                 event);
+                       return;
+               }
        }
 
        bnx2x_stats_stm[state][event].action(bp);
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 
-       mutex_unlock(&bp->stats_lock);
+       up(&bp->stats_lock);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
        /* Wait for statistics to end [while blocking further requests],
         * then run supplied function 'safely'.
         */
-       mutex_lock(&bp->stats_lock);
+       rc = down_timeout(&bp->stats_lock, HZ / 10);
+       if (unlikely(rc)) {
+               BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+               goto out_no_lock;
+       }
 
        bnx2x_stats_comp(bp);
        while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
        /* No need to restart statistics - if they're enabled, the timer
         * will restart the statistics.
         */
-       mutex_unlock(&bp->stats_lock);
-
+       up(&bp->stats_lock);
+out_no_lock:
        return rc;
 }
index e7651b3c6c5767f7609115ef0430c13aac8d17a9..420949cc55aab6349b75c33f0c4f061aa384d537 100644 (file)
@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                        phy_name = "external RGMII (no delay)";
                else
                        phy_name = "external RGMII (TX delay)";
-               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-               reg |= RGMII_MODE_EN | id_mode_dis;
-               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
                bcmgenet_sys_writel(priv,
                                    PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
                break;
@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                return -EINVAL;
        }
 
+       /* This is an external PHY (xMII), so we need to enable the RGMII
+        * block for the interface to work
+        */
+       if (priv->ext_phy) {
+               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+               reg |= RGMII_MODE_EN | id_mode_dis;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+       }
+
        if (init)
                dev_info(kdev, "configuring instance for %s\n", phy_name);
 
index 594a2ab36d3175de2633490eec1e0395dbb74e59..68f3c13c9ef6d992ac7eadde882c16b51375d6e8 100644 (file)
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
        if (status == BFA_STATUS_OK)
                bfa_ioc_lpu_start(ioc);
        else
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
 
        return status;
 }
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
        }
 
        if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
        } else {
                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
                mod_timer(&ioc->iocpf_timer, jiffies +
index 37072a83f9d6d0afb29de683051e13af94a78fd8..caae6cb2bc1a4528f4d97bd8e1e11adf074bc81e 100644 (file)
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
        setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
                                ((unsigned long)bnad));
 
-       /* Now start the timer before calling IOC */
-       mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
-                 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
-
        /*
         * Start the chip
         * If the call back comes with error, we bail out.
index ebf462d8082f79373c1ea234e4f3034a16c53e73..badea368bdc89621927101dc0a79504765b87248 100644 (file)
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                        u32 *bfi_image_size, char *fw_name)
 {
        const struct firmware *fw;
+       u32 n;
 
        if (request_firmware(&fw, fw_name, &pdev->dev)) {
                pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
        *bfi_image_size = fw->size/sizeof(u32);
        bfi_fw = fw;
 
+       /* Convert loaded firmware to host order as it is stored in file
+        * as sequence of LE32 integers.
+        */
+       for (n = 0; n < *bfi_image_size; n++)
+               le32_to_cpus(*bfi_image + n);
+
        return *bfi_image;
 error:
        return NULL;
index 28d9ca675a274f9876473bcce7e6995a14e1289e..68d47b196daec3d3c5d0b8af19f8d167735e1e79 100644 (file)
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_devcmd_fw_info *fw_info;
+       int err;
 
-       enic_dev_fw_info(enic, &fw_info);
+       err = enic_dev_fw_info(enic, &fw_info);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+        * For other failures, like devcmd failure, we return previously
+        * recorded info.
+        */
+       if (err == -ENOMEM)
+               return;
 
        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *vstats;
        unsigned int i;
-
-       enic_dev_stats_dump(enic, &vstats);
+       int err;
+
+       err = enic_dev_stats_dump(enic, &vstats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return;
 
        for (i = 0; i < enic_n_tx_stats; i++)
                *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
index 204bd182473bceaaabaa5b1eba5ed618de751808..eadae1b412c652974dde24a9a76c5d74a8c3fa29 100644 (file)
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *stats;
+       int err;
 
-       enic_dev_stats_dump(enic, &stats);
+       err = enic_dev_stats_dump(enic, &stats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return net_stats;
 
        net_stats->tx_packets = stats->tx.tx_frames_ok;
        net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
+       enic_poll_unlock_napi(&enic->rq[rq]);
        if (work_done < work_to_do) {
 
                /* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
-       enic_poll_unlock_napi(&enic->rq[rq]);
 
        return work_done;
 }
index 36a2ed606c911f21355360fad81eb39b18162c59..c4b2183bf352fb2a1881001777df91857c2d1f79 100644 (file)
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
        struct vnic_rq_buf *buf;
        u32 fetch_index;
        unsigned int count = rq->ring.desc_count;
+       int i;
 
        buf = rq->to_clean;
 
-       while (vnic_rq_desc_used(rq) > 0) {
-
+       for (i = 0; i < rq->ring.desc_count; i++) {
                (*buf_clean)(rq, buf);
-
-               buf = rq->to_clean = buf->next;
-               rq->ring.desc_avail++;
+               buf = buf->next;
        }
+       rq->ring.desc_avail = rq->ring.desc_count - 1;
 
        /* Use current fetch_index as the ring starting point */
        fetch_index = ioread32(&rq->ctrl->fetch_index);
index fb140faeafb1cbda612cd11a9a1aac04e936c4a3..c5e1d0ac75f909f843dd0397ad41b85eeb26a164 100644 (file)
@@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
        total_size = buf_len;
 
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
-       get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                             get_fat_cmd.size,
-                                             &get_fat_cmd.dma);
+       get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            get_fat_cmd.size,
+                                            &get_fat_cmd.dma, GFP_ATOMIC);
        if (!get_fat_cmd.va) {
                dev_err(&adapter->pdev->dev,
                        "Memory allocation failure while reading FAT data\n");
@@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
                log_offset += buf_size;
        }
 err:
-       pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                           get_fat_cmd.va, get_fat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+                         get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
                return -EINVAL;
 
        cmd.size = sizeof(struct be_cmd_resp_port_type);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
                return -ENOMEM;
        }
-       memset(cmd.va, 0, cmd.size);
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
        }
 err:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
-       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                             &attribs_cmd.dma);
+       attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            attribs_cmd.size,
+                                            &attribs_cmd.dma, GFP_ATOMIC);
        if (!attribs_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (attribs_cmd.va)
-               pci_free_consistent(adapter->pdev, attribs_cmd.size,
-                                   attribs_cmd.va, attribs_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+                                 attribs_cmd.va, attribs_cmd.dma);
        return status;
 }
 
@@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
-       get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                                  get_mac_list_cmd.size,
-                                                  &get_mac_list_cmd.dma);
+       get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                                 get_mac_list_cmd.size,
+                                                 &get_mac_list_cmd.dma,
+                                                 GFP_ATOMIC);
 
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
 out:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
 }
 
@@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
-       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                                   &cmd.dma, GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 
 }
@@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
        if (!extfat_cmd.va)
                return -ENOMEM;
 
@@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
 err:
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
        return status;
 }
 
@@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
 
        if (!extfat_cmd.va) {
                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
                }
        }
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
 err:
        return level;
 }
@@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
        status = be_cmd_notify_wait(adapter, &wrb);
 
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
index b765c24625bf523fd7932be17f6dfa22840a8e46..2835dee5dc3930cc5d1d09ec958bd2557228a2cd 100644 (file)
@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
        int status = 0;
 
        read_cmd.size = LANCER_READ_FILE_CHUNK;
-       read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
-                                          &read_cmd.dma);
+       read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
+                                         &read_cmd.dma, GFP_ATOMIC);
 
        if (!read_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                        break;
                }
        }
-       pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
-                           read_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+                         read_cmd.dma);
 
        return status;
 }
@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
        };
 
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-       ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
-                                          &ddrdma_cmd.dma, GFP_KERNEL);
+       ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           ddrdma_cmd.size, &ddrdma_cmd.dma,
+                                           GFP_KERNEL);
        if (!ddrdma_cmd.va)
                return -ENOMEM;
 
@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
        eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-       eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
-                                          &eeprom_cmd.dma, GFP_KERNEL);
+       eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           eeprom_cmd.size, &eeprom_cmd.dma,
+                                           GFP_KERNEL);
 
        if (!eeprom_cmd.va)
                return -ENOMEM;
index a6dcbf850c1fd4e09462d40f5f0e7cc08cfb2088..e43cc8a73ea7e85a927443c077c18ce6c673751a 100644 (file)
@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                                    adapter->cfg_num_qs);
 
        for_all_evt_queues(adapter, eqo, i) {
+               int numa_node = dev_to_node(&adapter->pdev->dev);
                if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
                        return -ENOMEM;
-               cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
-                                           eqo->affinity_mask);
-
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
                napi_hash_add(&eqo->napi);
@@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
 
        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
                                + LANCER_FW_DOWNLOAD_CHUNK;
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
-                                         &flash_cmd.dma, GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+                                          &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
 
@@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
        }
 
        flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
-                                         GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+                                          GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
 
@@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
        int status = 0;
 
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
-                                               &mbox_mem_alloc->dma,
-                                               GFP_KERNEL);
+       mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
+                                                &mbox_mem_alloc->dma,
+                                                GFP_KERNEL);
        if (!mbox_mem_alloc->va)
                return -ENOMEM;
 
        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
-       memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
        rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
index de79193221903edee02810fe657ac44815e877ca..b9df0cbd0a3833321d1f73bc74258b50b137f225 100644 (file)
@@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
 
 static int emac_get_regs_len(struct emac_instance *dev)
 {
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4))
-               return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC4_ETHTOOL_REGS_SIZE(dev);
-       else
                return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC_ETHTOOL_REGS_SIZE(dev);
+                       sizeof(struct emac_regs);
 }
 
 static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
        struct emac_ethtool_regs_subhdr *hdr = buf;
 
        hdr->index = dev->cell_index;
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+       if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+               hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
+       } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
                hdr->version = EMAC4_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
        } else {
                hdr->version = EMAC_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
        }
+       memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
+       return (void *)(hdr + 1) + sizeof(struct emac_regs);
 }
 
 static void emac_ethtool_get_regs(struct net_device *ndev,
index 67f342a9f65e46fe8dd015b921fd144e30db286b..28df37420da963d5d8f3b3234e4f584442537121 100644 (file)
@@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
 };
 
 #define EMAC_ETHTOOL_REGS_VER          0
-#define EMAC_ETHTOOL_REGS_SIZE(dev)    ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
-#define EMAC4_ETHTOOL_REGS_VER         1
-#define EMAC4_ETHTOOL_REGS_SIZE(dev)   ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER         1
+#define EMAC4SYNC_ETHTOOL_REGS_VER     2
 
 #endif /* __IBM_NEWEMAC_CORE_H */
index 33c35d3b7420fa9ae545aea4ebd5160036914718..5d47307121abbe413cd259ff74f9aa2ee68e6c45 100644 (file)
@@ -317,6 +317,7 @@ struct i40e_pf {
 #endif
 #define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
 #define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
index 34170eabca7da939ba1c8b9b5fad14dc2f54370d..da0faf478af076199e4281b0f3da57ad92c5e62b 100644 (file)
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               /* By default we are in VEPA mode, if this is the first VF/VMDq
+                * VSI to be added switch to VEB mode.
+                */
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
+
                vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
                if (vsi)
                        dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
index a54c14491e3b6a4dbc168980dd44d399b6766487..5b5bea159bd53c8684d0a69b310e492bc797c8b6 100644 (file)
@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        if (ret)
                goto end_reconstitute;
 
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               veb->bridge_mode = BRIDGE_MODE_VEB;
+       else
+               veb->bridge_mode = BRIDGE_MODE_VEPA;
        i40e_config_bridge_mode(veb);
 
        /* create the remaining VSIs attached to this VEB */
@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
                } else if (mode != veb->bridge_mode) {
                        /* Existing HW bridge but different mode needs reset */
                        veb->bridge_mode = mode;
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+                       if (mode == BRIDGE_MODE_VEB)
+                               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
                        break;
                }
        }
@@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.uplink_seid = vsi->uplink_seid;
                ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+               if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+                   (i40e_is_vsi_uplink_mode_veb(vsi))) {
                        ctxt.info.valid_sections |=
-                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                            cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
                        ctxt.info.switch_id =
-                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+                          cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
                }
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                         __func__);
                                return NULL;
                        }
+                       /* We come up by default in VEPA mode if SRIOV is not
+                        * already enabled, in which case we can't force VEPA
+                        * mode.
+                        */
+                       if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                               veb->bridge_mode = BRIDGE_MODE_VEPA;
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       }
                        i40e_config_bridge_mode(veb);
                }
                for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_switch_setup;
        }
 
+#ifdef CONFIG_PCI_IOV
+       /* prep for VF support */
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               if (pci_num_vf(pdev))
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+       }
+#endif
        err = i40e_setup_pf_switch(pf, false);
        if (err) {
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
index 4bd3a80aba82998bba343a1870b2d21f59bca4e0..9d95042d5a0f5805824d53ecc847ff76a9909444 100644 (file)
@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  * @skb:      send buffer
  * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
  *
  * Note: Our HW can't scatter-gather more than 8 fragments to build
  * a packet on the wire and so we need to figure out the cases where we
  * need to linearize the skb.
  **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
 {
        struct skb_frag_struct *frag;
        bool linearize = false;
@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
        gso_segs = skb_shinfo(skb)->gso_segs;
 
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
 
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
@@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
 
index 78d1c4ff565e8853473b70c3827e6a727ff3ce1c..4e9376da051829969de7750c2dc7a66acc5e5f40 100644 (file)
@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-       if (num_vfs)
+       if (num_vfs) {
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
                return i40e_pci_sriov_enable(pdev, num_vfs);
+       }
 
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+               i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                return -EINVAL;
index b077e02a0cc7ac8f67ad90560cf990f8f7a66277..458fbb421090772d0bbc1620277624339e0cd757 100644 (file)
@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  * @skb:      send buffer
  * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
  *
  * Note: Our HW can't scatter-gather more than 8 fragments to build
  * a packet on the wire and so we need to figure out the cases where we
  * need to linearize the skb.
  **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
 {
        struct skb_frag_struct *frag;
        bool linearize = false;
@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
        gso_segs = skb_shinfo(skb)->gso_segs;
 
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
 
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
@@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
 
index e3b9b63ad01083cb987429f57c9ebef84d86f4db..c3a9392cbc192229f4178c913fad8ab64d8c44c3 100644 (file)
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                        igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igb->perout[i].period.tv_sec = ts.tv_sec;
                        igb->perout[i].period.tv_nsec = ts.tv_nsec;
-                       wr32(trgttiml, rq->perout.start.sec);
-                       wr32(trgttimh, rq->perout.start.nsec);
+                       wr32(trgttimh, rq->perout.start.sec);
+                       wr32(trgttiml, rq->perout.start.nsec);
                        tsauxc |= tsauxc_mask;
                        tsim |= tsim_mask;
                } else {
index 4f7dc044601e2751ad625e4c011aa3a1c328e62f..529ef0594b902ebaf2838cf478ef914a0b69d5b7 100644 (file)
@@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                         msecs_to_jiffies(timeout))) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
-               err = -EIO;
-               goto out_reset;
+               if (op == MLX4_CMD_NOP) {
+                       err = -EBUSY;
+                       goto out;
+               } else {
+                       err = -EIO;
+                       goto out_reset;
+               }
        }
 
        err = context->result;
index 32f5ec7374723d1315f4234f77b12ffbe5adcfe0..cf467a9f6cc78c0c8a53b9120cec2795888f4904 100644 (file)
@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
 {
        struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
        int numa_node = priv->mdev->dev->numa_node;
-       int ret = 0;
 
        if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
-                                         ring->affinity_mask);
-       if (ret)
-               free_cpumask_var(ring->affinity_mask);
-
-       return ret;
+       cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
+                       ring->affinity_mask);
+       return 0;
 }
 
 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
index f7bf312fb44311b1c436c4eb3706341db92c1db0..7bed3a88579fa9db92d7e42ad7d43265bd8a3d41 100644 (file)
@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->queue_index = queue_index;
 
        if (queue_index < priv->num_tx_rings_p_up)
-               cpumask_set_cpu_local_first(queue_index,
-                                           priv->mdev->dev->numa_node,
-                                           &ring->affinity_mask);
+               cpumask_set_cpu(cpumask_local_spread(queue_index,
+                                                    priv->mdev->dev->numa_node),
+                               &ring->affinity_mask);
 
        *pring = ring;
        return 0;
index e0c31e3947d1091371bfa742fbea5cee9743002d..6409a06bbdf633b0ce440bf817aabfe69311dd1e 100644 (file)
@@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
        u8 dw, rows, cols, banks, ranks;
        u32 val;
 
-       if (size != sizeof(struct netxen_dimm_cfg)) {
+       if (size < attr->size) {
                netdev_err(netdev, "Invalid size\n");
-               return -1;
+               return -EINVAL;
        }
 
        memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@@ -3137,7 +3137,7 @@ out:
 
 static struct bin_attribute bin_attr_dimm = {
        .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
-       .size = 0,
+       .size = sizeof(struct netxen_dimm_cfg),
        .read = netxen_sysfs_read_dimm,
 };
 
index c0ad95d2f63d9a12cd300aa0420ddda661ccaed1..809ea4610a77e774af0413d896e8ec802946d8fa 100644 (file)
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
        }
 }
 
-static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int num_bufs)
 {
-       if (rx_buf->page) {
-               put_page(rx_buf->page);
-               rx_buf->page = NULL;
-       }
+       do {
+               if (rx_buf->page) {
+                       put_page(rx_buf->page);
+                       rx_buf->page = NULL;
+               }
+               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+       } while (--num_bufs);
 }
 
 /* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
        /* If this is the last buffer in a page, unmap and free it. */
        if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
                efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
-               efx_free_rx_buffer(rx_buf);
+               efx_free_rx_buffers(rx_queue, rx_buf, 1);
        }
        rx_buf->page = NULL;
 }
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
 
        efx_recycle_rx_pages(channel, rx_buf, n_frags);
 
-       do {
-               efx_free_rx_buffer(rx_buf);
-               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
-       } while (--n_frags);
+       efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 }
 
 /**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 
        skb = napi_get_frags(napi);
        if (unlikely(!skb)) {
-               while (n_frags--) {
-                       put_page(rx_buf->page);
-                       rx_buf->page = NULL;
-                       rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
-               }
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
 
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 
        skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
        if (unlikely(skb == NULL)) {
-               efx_free_rx_buffer(rx_buf);
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
        skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
+               struct efx_rx_queue *rx_queue;
+
                efx_loopback_rx_packet(efx, eh, rx_buf->len);
-               efx_free_rx_buffer(rx_buf);
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf,
+                                   channel->rx_pkt_n_frags);
                goto out;
        }
 
index 2ac9552d1fa385953e261ff3797c74b8d5ad4add..73bab983edd96a47169bf4b1957e5fd13c28a3a0 100644 (file)
@@ -117,6 +117,12 @@ struct stmmac_priv {
        int use_riwt;
        int irq_wake;
        spinlock_t ptp_lock;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dbgfs_dir;
+       struct dentry *dbgfs_rings_status;
+       struct dentry *dbgfs_dma_cap;
+#endif
 };
 
 int stmmac_mdio_unregister(struct net_device *ndev);
index 05c146f718a36551c4fe4ada4871f2612f16571d..2c5ce2baca8712790d51096a53868b84466f7dde 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 #ifdef CONFIG_DEBUG_FS
 static int stmmac_init_fs(struct net_device *dev);
-static void stmmac_exit_fs(void);
+static void stmmac_exit_fs(struct net_device *dev);
 #endif
 
 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev)
        netif_carrier_off(dev);
 
 #ifdef CONFIG_DEBUG_FS
-       stmmac_exit_fs();
+       stmmac_exit_fs(dev);
 #endif
 
        stmmac_release_ptp(priv);
@@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *stmmac_fs_dir;
-static struct dentry *stmmac_rings_status;
-static struct dentry *stmmac_dma_cap;
 
 static void sysfs_display_ring(void *head, int size, int extend_desc,
                               struct seq_file *seq)
@@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
 
 static int stmmac_init_fs(struct net_device *dev)
 {
-       /* Create debugfs entries */
-       stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       /* Create per netdev entries */
+       priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
 
-       if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
-               pr_err("ERROR %s, debugfs create directory failed\n",
-                      STMMAC_RESOURCE_NAME);
+       if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
+               pr_err("ERROR %s/%s, debugfs create directory failed\n",
+                      STMMAC_RESOURCE_NAME, dev->name);
 
                return -ENOMEM;
        }
 
        /* Entry to report DMA RX/TX rings */
-       stmmac_rings_status = debugfs_create_file("descriptors_status",
-                                                 S_IRUGO, stmmac_fs_dir, dev,
-                                                 &stmmac_rings_status_fops);
+       priv->dbgfs_rings_status =
+               debugfs_create_file("descriptors_status", S_IRUGO,
+                                   priv->dbgfs_dir, dev,
+                                   &stmmac_rings_status_fops);
 
-       if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+       if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
                pr_info("ERROR creating stmmac ring debugfs file\n");
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
 
        /* Entry to report the DMA HW features */
-       stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
-                                            dev, &stmmac_dma_cap_fops);
+       priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
+                                           priv->dbgfs_dir,
+                                           dev, &stmmac_dma_cap_fops);
 
-       if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+       if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
                pr_info("ERROR creating stmmac MMC debugfs file\n");
-               debugfs_remove(stmmac_rings_status);
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
@@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev)
        return 0;
 }
 
-static void stmmac_exit_fs(void)
+static void stmmac_exit_fs(struct net_device *dev)
 {
-       debugfs_remove(stmmac_rings_status);
-       debugfs_remove(stmmac_dma_cap);
-       debugfs_remove(stmmac_fs_dir);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       debugfs_remove_recursive(priv->dbgfs_dir);
 }
 #endif /* CONFIG_DEBUG_FS */
 
@@ -3149,6 +3150,35 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif /* MODULE */
 
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       /* Create debugfs main directory if it doesn't exist yet */
+       if (!stmmac_fs_dir) {
+               stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+               if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+                       pr_err("ERROR %s, debugfs create directory failed\n",
+                              STMMAC_RESOURCE_NAME);
+
+                       return -ENOMEM;
+               }
+       }
+#endif
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index fb276f64cd6400cc7617c2586582c378eb2e9c53..34a75cba3b739ce5b4f28e1549915e19502fb4cc 100644 (file)
@@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
        return ret;
 }
 
+static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_10000baseKR_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_10000)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_2500baseX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_2500)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_1000baseKX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_1000)
+                       return true;
+       }
+
+       return false;
+}
+
 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
                               bool restart)
 {
@@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
        /* Set initial mode - call the mode setting routines
         * directly to insure we are properly configured
         */
-       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
+       if (amd_xgbe_phy_use_xgmii_mode(phydev))
                ret = amd_xgbe_phy_xgmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
+       else if (amd_xgbe_phy_use_gmii_mode(phydev))
                ret = amd_xgbe_phy_gmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_2500baseX_Full)
+       else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
                ret = amd_xgbe_phy_gmii_2500_mode(phydev);
        else
                ret = -EINVAL;
index 64c74c6a482806bfc5d2bb4f821b4b1ef085adfd..b5dc59de094eef06838d4601cacd9dbeaba04a6a 100644 (file)
@@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .name           = "Broadcom BCM7425",
        .features       = PHY_GBIT_FEATURES |
                          SUPPORTED_Pause | SUPPORTED_Asym_Pause,
-       .flags          = 0,
+       .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm7xxx_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
index 496e02f961d37039ff56d5e45a8aa28aa0f44b91..00cb41e713123689803e5dddfa527c3ebaee26ae 100644 (file)
@@ -47,7 +47,7 @@
 #define PSF_TX         0x1000
 #define EXT_EVENT      1
 #define CAL_EVENT      7
-#define CAL_TRIGGER    7
+#define CAL_TRIGGER    1
 #define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
@@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                        else
                                evnt |= EVNT_RISE;
                }
+               mutex_lock(&clock->extreg_lock);
                ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
+               mutex_unlock(&clock->extreg_lock);
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
@@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
 
 static void enable_status_frames(struct phy_device *phydev, bool on)
 {
+       struct dp83640_private *dp83640 = phydev->priv;
+       struct dp83640_clock *clock = dp83640->clock;
        u16 cfg0 = 0, ver;
 
        if (on)
@@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
 
        ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
 
+       mutex_lock(&clock->extreg_lock);
+
        ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
        ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
 
+       mutex_unlock(&clock->extreg_lock);
+
        if (!phydev->attached_dev) {
                pr_warn("expected to find an attached netdevice\n");
                return;
@@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
        list_del_init(&rxts->list);
        phy2rxts(phy_rxts, rxts);
 
-       spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+       spin_lock(&dp83640->rx_queue.lock);
        skb_queue_walk(&dp83640->rx_queue, skb) {
                struct dp83640_skb_info *skb_info;
 
@@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
                        break;
                }
        }
-       spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+       spin_unlock(&dp83640->rx_queue.lock);
 
        if (!shhwtstamps)
                list_add_tail(&rxts->list, &dp83640->rxts);
@@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
 
        if (clock->chosen && !list_empty(&clock->phylist))
                recalibrate(clock);
-       else
+       else {
+               mutex_lock(&clock->extreg_lock);
                enable_broadcast(phydev, clock->page, 1);
+               mutex_unlock(&clock->extreg_lock);
+       }
 
        enable_status_frames(phydev, true);
+
+       mutex_lock(&clock->extreg_lock);
        ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+       mutex_unlock(&clock->extreg_lock);
+
        return 0;
 }
 
index 4ec9811f49c87744458ed16cdcec32422432dc3f..65efb146898844510aa489502ee2c9db23906c92 100644 (file)
@@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
                                     msgbuf->rx_pktids,
                                     msgbuf->ioctl_resp_pktid);
        if (msgbuf->ioctl_resp_ret_len != 0) {
-               if (!skb) {
-                       brcmf_err("Invalid packet id idx recv'd %d\n",
-                                 msgbuf->ioctl_resp_pktid);
+               if (!skb)
                        return -EBADF;
-               }
+
                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
                                       len : msgbuf->ioctl_resp_ret_len);
        }
@@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->tx_pktids, idx);
-       if (!skb) {
-               brcmf_err("Invalid packet id idx recv'd %d\n", idx);
+       if (!skb)
                return;
-       }
 
        set_bit(flowid, msgbuf->txstatus_done_map);
        commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->rx_pktids, idx);
+       if (!skb)
+               return;
 
        if (data_offset)
                skb_pull(skb, data_offset);
index ab019b45551b9ea9bef61a1861feba7601897a5f..f89f446e5c8ae32b5dccc42cae6234ad75283ff8 100644 (file)
@@ -21,6 +21,7 @@ config IWLWIFI
                Intel 7260 Wi-Fi Adapter
                Intel 3160 Wi-Fi Adapter
                Intel 7265 Wi-Fi Adapter
+               Intel 3165 Wi-Fi Adapter
 
 
          This driver uses the kernel's mac80211 subsystem.
index 36e786f0387bd42593fe3c8ec523831694483bea..74ad278116be3feb18b3a2a98e4034aa3145a6a1 100644 (file)
 
 /* Highest firmware API version supported */
 #define IWL7260_UCODE_API_MAX  13
-#define IWL3160_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
-#define IWL3160_UCODE_API_OK   12
+#define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  10
-#define IWL3160_UCODE_API_MIN  10
+#define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
-#define IWL3165_FW_PRE "iwlwifi-3165-"
-#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
-
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3165",
-       .fw_name_pre = IWL3165_FW_PRE,
+       .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
+       /* sparse doens't like the re-assignment but it is safe */
+#ifndef __CHECKER__
+       .ucode_api_ok = IWL3165_UCODE_API_OK,
+       .ucode_api_min = IWL3165_UCODE_API_MIN,
+#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 41ff85de73343b0a5686bfd175164807e8dc4684..21302b6f2bfd79a8e8617a345e3771f6608c0145 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                return;
        }
 
+       if (data->sku_cap_mimo_disabled)
+               rx_chains = 1;
+
        ht_info->ht_supported = true;
        ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
 
index 5234a0bf11e4e3286b740c22518f4a039e224e94..750c8c9ee70d0352e5828049ff4b138e31a3ae6c 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
        bool sku_cap_11ac_enable;
        bool sku_cap_amt_enable;
        bool sku_cap_ipan_enable;
+       bool sku_cap_mimo_disabled;
 
        u16 radio_cfg_type;
        u8 radio_cfg_step;
index 83903a5025c2e69779554e7bcf980aff48b3d080..8e604a3931ca6db6a1ab0eff59d2787d8562e494 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
 
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
-       NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
-       NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
-       NVM_SKU_CAP_11N_ENABLE  = BIT(2),
-       NVM_SKU_CAP_11AC_ENABLE = BIT(3),
+       NVM_SKU_CAP_BAND_24GHZ          = BIT(0),
+       NVM_SKU_CAP_BAND_52GHZ          = BIT(1),
+       NVM_SKU_CAP_11N_ENABLE          = BIT(2),
+       NVM_SKU_CAP_11AC_ENABLE         = BIT(3),
+       NVM_SKU_CAP_MIMO_DISABLE        = BIT(5),
 };
 
 /*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
        if (cfg->ht_params->ldpc)
                vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
 
+       if (data->sku_cap_mimo_disabled) {
+               num_rx_ants = 1;
+               num_tx_ants = 1;
+       }
+
        if (num_tx_ants > 1)
                vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
        else
@@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
-       return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+       return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
 
 }
 
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        const u8 *hw_addr;
 
        if (mac_override) {
+               static const u8 reserved_mac[] = {
+                       0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+               };
+
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                data->hw_addr[4] = hw_addr[5];
                data->hw_addr[5] = hw_addr[4];
 
-               if (is_valid_ether_addr(data->hw_addr))
+               /*
+                * Force the use of the OTP MAC address in case of reserved MAC
+                * address in the NVM, or if address is given but invalid.
+                */
+               if (is_valid_ether_addr(data->hw_addr) &&
+                   memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
                        return;
 
                IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                data->sku_cap_11n_enable = false;
        data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
                                    (sku & NVM_SKU_CAP_11AC_ENABLE);
+       data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
index d954591e0be58528d138f8738b2cb2325db1fed3..6ac6de2af9779982231d1efb4c6186fad4442f5d 100644 (file)
@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        struct iwl_host_cmd cmd = {
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .dataflags = { IWL_HCMD_DFL_DUP, },
                .flags = CMD_ASYNC,
        };
        struct iwl_mvm_sta *mvmsta;
index 1b1b2bf26819be1d09903f3c52957d274ee50ed7..4310cf102d78ecd4f3e7baffa13570d878153cb4 100644 (file)
@@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        int i, j, n_matches, ret;
 
        fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
-       if (!IS_ERR_OR_NULL(fw_status))
+       if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
+               kfree(fw_status);
+       }
 
        if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
                wakeup.rfkill_release = true;
@@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
-               goto out_unlock;
+               goto err;
 
        ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
        if (ret)
-               goto out_unlock;
+               goto err;
 
        if (d3_status != IWL_D3_STATUS_ALIVE) {
                IWL_INFO(mvm, "Device was reset during suspend\n");
-               goto out_unlock;
+               goto err;
        }
 
        /* query SRAM first in case we want event logging */
@@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto out_iterate;
        }
 
- out_unlock:
+err:
+       iwl_mvm_free_nd(mvm);
        mutex_unlock(&mvm->mutex);
 
 out_iterate:
@@ -1915,6 +1918,14 @@ out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+
+       /* We always return 1, which causes mac80211 to do a reconfig
+        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
+        * reconfig calls iwl_mvm_restart_complete(), where we unref
+        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
+        * reference here.
+        */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        return 1;
 }
 
@@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
        iwl_abort_notification_waits(&mvm->notif_wait);
-       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        ieee80211_restart_hw(mvm->hw);
 
        /* wait for restart and disconnect all interfaces */
index 40265b9c66aeceedb6da22c05738628f7c289705..dda9f7b5f3423173e668f507719e47c3540b27d0 100644 (file)
@@ -3995,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
        if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
                return;
 
-       if (event->u.mlme.status == MLME_SUCCESS)
-               return;
-
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
        if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
index 1c66297d82c0a80dd0bb66c7148ea8ef42c5e099..2ea01238754eb8d1c2470156f0293a2e15988fd6 100644 (file)
@@ -1263,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
                ieee80211_iterate_active_interfaces(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                        iwl_mvm_d0i3_disconnect_iter, mvm);
-
-       iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
+       /* qos_seq might point inside resp_pkt, so free it only now */
+       if (get_status_cmd.resp_pkt)
+               iwl_free_resp(&get_status_cmd);
+
        /* the FW might have updated the regdomain */
        iwl_mvm_update_changed_regdom(mvm);
 
index f9928f2c125f726bbf89474096bd47990bfb86eb..33cd68ae7bf9362539fa1a99e34686e0cca3de2b 100644 (file)
@@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
+       if (mvm->nvm_data->sku_cap_mimo_disabled)
+               return false;
+
        return true;
 }
 
index 01996c9d98a79b1d62e3a665cd0c720df79ad04e..376b84e54ad7e8bbb48d039d354c03748665451c 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -320,7 +320,7 @@ struct iwl_trans_pcie {
 
        /*protect hw register */
        spinlock_t reg_lock;
-       bool cmd_in_flight;
+       bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
        /* protect ref counter */
index 47bbf573fdc836c9e410bd58c8decbe7e91830c4..dc179094e6a0d440b2aa29909c05adbc07f3f6b5 100644 (file)
@@ -1049,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
                iwl_pcie_rx_stop(trans);
 
                /* Power-down device's busmaster DMA clocks */
-               iwl_write_prph(trans, APMG_CLK_DIS_REG,
-                              APMG_CLK_VAL_DMA_CLK_RQT);
-               udelay(5);
+               if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+                       iwl_write_prph(trans, APMG_CLK_DIS_REG,
+                                      APMG_CLK_VAL_DMA_CLK_RQT);
+                       udelay(5);
+               }
        }
 
        /* Make sure (redundant) we've released our request to stay awake */
@@ -1370,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
 
        spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        /* this bit wakes up the NIC */
@@ -1436,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
         */
        __acquire(&trans_pcie->reg_lock);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
index 06952aadfd7b5d4dccfff9f9689cd804031ab0eb..5ef8044c2ea3ed7317870902168c71be936cd8df 100644 (file)
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                iwl_trans_pcie_ref(trans);
        }
 
-       if (trans_pcie->cmd_in_flight)
-               return 0;
-
-       trans_pcie->cmd_in_flight = true;
-
        /*
         * wake up the NIC to make sure that the firmware will see the host
         * command - we will let the NIC sleep once all the host commands
         * returned. This needs to be done only on NICs that have
         * apmg_wake_up_wa set.
         */
-       if (trans->cfg->base_params->apmg_wake_up_wa) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           !trans_pcie->cmd_hold_nic_awake) {
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
@@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                if (ret < 0) {
                        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
                                        CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       trans_pcie->cmd_in_flight = false;
                        IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
                        return -EIO;
                }
+               trans_pcie->cmd_hold_nic_awake = true;
        }
 
        return 0;
@@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
                iwl_trans_pcie_unref(trans);
        }
 
-       if (WARN_ON(!trans_pcie->cmd_in_flight))
-               return 0;
-
-       trans_pcie->cmd_in_flight = false;
+       if (trans->cfg->base_params->apmg_wake_up_wa) {
+               if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+                       return 0;
 
-       if (trans->cfg->base_params->apmg_wake_up_wa)
+               trans_pcie->cmd_hold_nic_awake = false;
                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
+                                          CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       }
        return 0;
 }
 
index 4de46aa61d958fb9c5a1ae9d1ec3c0a0e48acdd4..0d2594395ffbc797671711603461148270f1a03f 100644 (file)
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        netdev_err(queue->vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
-                                  (txreq.offset&~PAGE_MASK) + txreq.size);
+                                  (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
                        xenvif_fatal_tx_err(queue->vif);
                        break;
                }
index 3d8dbf5f2d396aa8dde8745afe98d8b4f9febd67..968787abf78d454166561e0c79f9f7421dad931d 100644 (file)
@@ -34,6 +34,8 @@ struct backend_info {
        enum xenbus_state frontend_state;
        struct xenbus_watch hotplug_status_watch;
        u8 have_hotplug_status_watch:1;
+
+       const char *hotplug_script;
 };
 
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
                xenvif_free(be->vif);
                be->vif = NULL;
        }
+       kfree(be->hotplug_script);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
        return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
        struct xenbus_transaction xbt;
        int err;
        int sg;
+       const char *script;
        struct backend_info *be = kzalloc(sizeof(struct backend_info),
                                          GFP_KERNEL);
        if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                pr_debug("Error writing multi-queue-max-queues\n");
 
+       script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+       if (IS_ERR(script)) {
+               err = PTR_ERR(script);
+               xenbus_dev_fatal(dev, err, "reading script");
+               goto fail;
+       }
+
+       be->hotplug_script = script;
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
                          struct kobj_uevent_env *env)
 {
        struct backend_info *be = dev_get_drvdata(&xdev->dev);
-       char *val;
 
-       val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-       if (IS_ERR(val)) {
-               int err = PTR_ERR(val);
-               xenbus_dev_fatal(xdev, err, "reading script");
-               return err;
-       } else {
-               if (add_uevent_var(env, "script=%s", val)) {
-                       kfree(val);
-                       return -ENOMEM;
-               }
-               kfree(val);
-       }
+       if (!be)
+               return 0;
 
-       if (!be || !be->vif)
+       if (add_uevent_var(env, "script=%s", be->hotplug_script))
+               return -ENOMEM;
+
+       if (!be->vif)
                return 0;
 
        return add_uevent_var(env, "vif=%s", be->vif->dev->name);
@@ -793,6 +798,7 @@ static void connect(struct backend_info *be)
                        goto err;
                }
 
+               queue->credit_bytes = credit_bytes;
                queue->remaining_credit = credit_bytes;
                queue->credit_usec = credit_usec;
 
index 3f45afd4382e164053dac1231978e91a5af6dbe0..e031c943286ef3f7765e42640397626d7555607c 100644 (file)
@@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
 
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
+               del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
 
@@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = {
 static int xennet_remove(struct xenbus_device *dev)
 {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
-       unsigned int num_queues = info->netdev->real_num_tx_queues;
-       struct netfront_queue *queue = NULL;
-       unsigned int i = 0;
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
@@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
-               queue = &info->queues[i];
-               del_timer_sync(&queue->rx_refill_timer);
-       }
-
-       if (num_queues) {
-               kfree(info->queues);
-               info->queues = NULL;
-       }
-
+       xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
index cd29b1038c5e3bf6f4a21659343c65584c44b969..15f9b7c9e4d38e93a52864a953e12d4172602797 100644 (file)
@@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev)
        u32 ppd;
 
        ndev->hw_type = BWD_HW;
+       ndev->limits.max_mw = BWD_MAX_MW;
 
        rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
        if (rc)
@@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
                                 MW_TO_BAR(i));
                        rc = -EIO;
-                       goto err3;
+                       goto err4;
                }
        }
 
index 99764db0875aa0e1b34ca348ca1606c2a8990258..f0650265febf95cc6a37d03dd3d5b38b0d7370af 100644 (file)
@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
        return 0;
 }
 
-static int __init of_init(void)
+void __init of_core_init(void)
 {
        struct device_node *np;
 
@@ -198,7 +198,8 @@ static int __init of_init(void)
        of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
        if (!of_kset) {
                mutex_unlock(&of_mutex);
-               return -ENOMEM;
+               pr_err("devicetree: failed to register existing nodes\n");
+               return;
        }
        for_each_of_allnodes(np)
                __of_attach_node_sysfs(np);
@@ -207,10 +208,7 @@ static int __init of_init(void)
        /* Symlink in /proc as required by userspace ABI */
        if (of_root)
                proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
-
-       return 0;
 }
-core_initcall(of_init);
 
 static struct property *__of_find_property(const struct device_node *np,
                                           const char *name, int *lenp)
index 3351ef408125d757f52ac772700687ef7f735c06..53826b84e0ec6d46d3699705f46216070a471867 100644 (file)
@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
        phandle = __of_get_property(np, "phandle", &sz);
        if (!phandle)
                phandle = __of_get_property(np, "linux,phandle", &sz);
-       if (IS_ENABLED(PPC_PSERIES) && !phandle)
+       if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
                phandle = __of_get_property(np, "ibm,phandle", &sz);
        np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
 
index 4fd0cacf7ca0ae0dfaebf5c612f457cdf6fa43f9..508cc56130e3f88d1b01716a7a00fead250fdf1c 100644 (file)
@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
                 * consistent.
                 */
                if (add_align > dev_res->res->start) {
+                       resource_size_t r_size = resource_size(dev_res->res);
+
                        dev_res->res->start = add_align;
-                       dev_res->res->end = add_align +
-                                           resource_size(dev_res->res);
+                       dev_res->res->end = add_align + r_size - 1;
 
                        list_for_each_entry(dev_res2, head, list) {
                                align = pci_resource_alignment(dev_res2->dev,
                                                               dev_res2->res);
-                               if (add_align > align)
+                               if (add_align > align) {
                                        list_move_tail(&dev_res->list,
                                                       &dev_res2->list);
+                                       break;
+                               }
                        }
                }
 
index a53bd5b52df97ff48fa921a5009f2fa6937aa377..fc9b9f0ea91e8132b08c85478a592e3f820fc2cc 100644 (file)
@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
 config PHY_DM816X_USB
        tristate "TI dm816x USB PHY driver"
        depends on ARCH_OMAP2PLUS
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        help
          Enable this for dm816x USB to work.
 
@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
 config OMAP_USB2
        tristate "OMAP USB2 PHY Driver"
        depends on ARCH_OMAP2PLUS
-       depends on USB_PHY
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        select OMAP_CONTROL_PHY
        depends on OMAP_OCP2SCP
        help
@@ -122,8 +125,9 @@ config TI_PIPE3
 config TWL4030_USB
        tristate "TWL4030 USB Transceiver Driver"
        depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
-       depends on USB_PHY
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        help
          Enable this to support the USB OTG transceiver on TWL4030
          family chips (including the TWL5030 and TPS659x0 devices).
@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
 
 config PHY_QCOM_UFS
        tristate "Qualcomm UFS PHY driver"
-       depends on OF && ARCH_MSM
+       depends on OF && ARCH_QCOM
        select GENERIC_PHY
        help
          Support for UFS PHY on QCOM chipsets.
index 3791838f4bd4b14e145dd5718a3030c4b89d9f3b..63bc12d7a73e561a8e967ac4fb7f453c9a0d23ab 100644 (file)
@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = phy_get(dev, string);
 
-       if (PTR_ERR(phy) == -ENODEV)
+       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
                phy = NULL;
 
        return phy;
@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = devm_phy_get(dev, string);
 
-       if (PTR_ERR(phy) == -ENODEV)
+       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
                phy = NULL;
 
        return phy;
index 183ef43681016ba0f238edfa98bbbef3684ab543..c1a468686bdc72433b7596512cb70852f3ef2420 100644 (file)
@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
                phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
                if (IS_ERR(phy->wkupclk)) {
                        dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
+                       pm_runtime_disable(phy->dev);
                        return PTR_ERR(phy->wkupclk);
                } else {
                        dev_warn(&pdev->dev,
index 778276aba3aa0092d8e8e7bc2de15eae4f5a5a15..97d45f47d1ade847f9f0d7462d0ae91e91505974 100644 (file)
@@ -23,7 +23,7 @@
 #define USBHS_LPSTS                    0x02
 #define USBHS_UGCTRL                   0x80
 #define USBHS_UGCTRL2                  0x84
-#define USBHS_UGSTS                    0x88    /* The manuals have 0x90 */
+#define USBHS_UGSTS                    0x88    /* From technical update */
 
 /* Low Power Status register (LPSTS) */
 #define USBHS_LPSTS_SUSPM              0x4000
@@ -41,7 +41,7 @@
 #define USBHS_UGCTRL2_USB0SEL_HS_USB   0x00000030
 
 /* USB General status register (UGSTS) */
-#define USBHS_UGSTS_LOCK               0x00000300 /* The manuals have 0x3 */
+#define USBHS_UGSTS_LOCK               0x00000100 /* From technical update */
 
 #define PHYS_PER_CHANNEL       2
 
index 4ad5c1a996e3e906023bbe246c98a258ec3bba23..e406e3d8c1c71713e08ceb440e43900fbbb5b8be 100644 (file)
@@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
        CYGNUS_PINRANGE(87, 104, 12),
        CYGNUS_PINRANGE(99, 102, 2),
        CYGNUS_PINRANGE(101, 90, 4),
-       CYGNUS_PINRANGE(105, 116, 10),
+       CYGNUS_PINRANGE(105, 116, 6),
+       CYGNUS_PINRANGE(111, 100, 2),
+       CYGNUS_PINRANGE(113, 122, 4),
        CYGNUS_PINRANGE(123, 11, 1),
        CYGNUS_PINRANGE(124, 38, 4),
        CYGNUS_PINRANGE(128, 43, 1),
index 82f691eeeec4d82cd5e75b7a96be719befbcd57f..732ff757a95fe12fe7b5ddca1714a8e527017e90 100644 (file)
@@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d)
        chv_gpio_irq_mask_unmask(d, false);
 }
 
+static unsigned chv_gpio_irq_startup(struct irq_data *d)
+{
+       /*
+        * Check if the interrupt has been requested with 0 as triggering
+        * type. In that case it is assumed that the current values
+        * programmed to the hardware are used (e.g BIOS configured
+        * defaults).
+        *
+        * In that case ->irq_set_type() will never be called so we need to
+        * read back the values from hardware now, set correct flow handler
+        * and update mappings before the interrupt is being used.
+        */
+       if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
+               struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+               struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+               unsigned offset = irqd_to_hwirq(d);
+               int pin = chv_gpio_offset_to_pin(pctrl, offset);
+               irq_flow_handler_t handler;
+               unsigned long flags;
+               u32 intsel, value;
+
+               intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+               intsel &= CHV_PADCTRL0_INTSEL_MASK;
+               intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+               value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+               if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
+                       handler = handle_level_irq;
+               else
+                       handler = handle_edge_irq;
+
+               spin_lock_irqsave(&pctrl->lock, flags);
+               if (!pctrl->intr_lines[intsel]) {
+                       __irq_set_handler_locked(d->irq, handler);
+                       pctrl->intr_lines[intsel] = offset;
+               }
+               spin_unlock_irqrestore(&pctrl->lock, flags);
+       }
+
+       chv_gpio_irq_unmask(d);
+       return 0;
+}
+
 static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 
 static struct irq_chip chv_gpio_irqchip = {
        .name = "chv-gpio",
+       .irq_startup = chv_gpio_irq_startup,
        .irq_ack = chv_gpio_irq_ack,
        .irq_mask = chv_gpio_irq_mask,
        .irq_unmask = chv_gpio_irq_unmask,
index edcd140e089968e0f7b95fef4ffcd82f157f8294..a70a5fe79d44d343b0e1830ccd36fe7b6384d314 100644 (file)
@@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
                domain->chip.direction_output = meson_gpio_direction_output;
                domain->chip.get = meson_gpio_get;
                domain->chip.set = meson_gpio_set;
-               domain->chip.base = -1;
+               domain->chip.base = domain->data->pin_base;
                domain->chip.ngpio = domain->data->num_pins;
                domain->chip.can_sleep = false;
                domain->chip.of_node = domain->of_node;
index 2f7ea62298801c2a5b9b87605a3fe5fbb0ca00bd..9677807db364d70ee4512799e26449bccba56a08 100644 (file)
@@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = {
                .banks          = meson8b_banks,
                .num_banks      = ARRAY_SIZE(meson8b_banks),
                .pin_base       = 0,
-               .num_pins       = 83,
+               .num_pins       = 130,
        },
        {
                .name           = "ao-bank",
                .banks          = meson8b_ao_banks,
                .num_banks      = ARRAY_SIZE(meson8b_ao_banks),
-               .pin_base       = 83,
+               .pin_base       = 130,
                .num_pins       = 16,
        },
 };
index 9bb9ad6d4a1b1b1c3600cd283f240d2479a80841..28f328136f0df78fe3253d6996ae9108229f87e5 100644 (file)
@@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_reason);
+static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
 
 static void hotkey_wakeup_reason_notify_change(void)
 {
@@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete);
+static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
+                  hotkey_wakeup_hotunplug_complete_show, NULL);
 
 static void hotkey_wakeup_hotunplug_complete_notify_change(void)
 {
@@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_hotkey_enable.attr,
        &dev_attr_hotkey_bios_enabled.attr,
        &dev_attr_hotkey_bios_mask.attr,
-       &dev_attr_hotkey_wakeup_reason.attr,
-       &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
+       &dev_attr_wakeup_reason.attr,
+       &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
@@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev,
                        attr, buf, count);
 }
 
-static DEVICE_ATTR_RW(wan_enable);
+static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO,
+                  wan_enable_show, wan_enable_store);
 
 /* --------------------------------------------------------------------- */
 
 static struct attribute *wan_attributes[] = {
-       &dev_attr_wan_enable.attr,
+       &dev_attr_wwan_enable.attr,
        NULL
 };
 
@@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1_enable);
+static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+                  fan_pwm1_enable_show, fan_pwm1_enable_store);
 
 /* sysfs fan pwm1 ------------------------------------------------------ */
 static ssize_t fan_pwm1_show(struct device *dev,
@@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
        return (rc) ? rc : count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1);
+static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store);
 
 /* sysfs fan fan1_input ------------------------------------------------ */
 static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan1_input);
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
 
 /* sysfs fan fan2_input ------------------------------------------------ */
 static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan2_input);
+static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
 
 /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
 static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
 
 /* --------------------------------------------------------------------- */
 static struct attribute *fan_attributes[] = {
-       &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
-       &dev_attr_fan_fan1_input.attr,
+       &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr,
+       &dev_attr_fan1_input.attr,
        NULL, /* for fan2_input */
        NULL
 };
@@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
                if (tp_features.second_fan) {
                        /* attach second fan tachometer */
                        fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
-                                       &dev_attr_fan_fan2_input.attr;
+                                       &dev_attr_fan2_input.attr;
                }
                rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
                                         &fan_attr_group);
@@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
 }
 
-static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name);
+static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
 
 /* --------------------------------------------------------------------- */
 
@@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void)
                hwmon_device_unregister(tpacpi_hwmon);
 
        if (tp_features.sensors_pdev_attrs_registered)
-               device_remove_file(&tpacpi_sensors_pdev->dev,
-                                  &dev_attr_thinkpad_acpi_pdev_name);
+               device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (tpacpi_sensors_pdev)
                platform_device_unregister(tpacpi_sensors_pdev);
        if (tpacpi_pdev)
@@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void)
                thinkpad_acpi_module_exit();
                return ret;
        }
-       ret = device_create_file(&tpacpi_sensors_pdev->dev,
-                                &dev_attr_thinkpad_acpi_pdev_name);
+       ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (ret) {
                pr_err("unable to create sysfs hwmon device attributes\n");
                thinkpad_acpi_module_exit();
index 8a4df7a1f2eecc879a679711d13d64885397af39..e628d4c2f2ae43de1955aac857f745ec2d3d0357 100644 (file)
@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
 
 static int da9052_regulator_probe(struct platform_device *pdev)
 {
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
        struct regulator_config config = { };
        struct da9052_regulator *regulator;
        struct da9052 *da9052;
@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        regulator->da9052 = da9052;
 
        regulator->info = find_regulator_info(regulator->da9052->chip_id,
-                                             pdev->id);
+                                             cell->id);
        if (regulator->info == NULL) {
                dev_err(&pdev->dev, "invalid regulator ID specified\n");
                return -EINVAL;
@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        config.driver_data = regulator;
        config.regmap = da9052->regmap;
        if (pdata && pdata->regulators) {
-               config.init_data = pdata->regulators[pdev->id];
+               config.init_data = pdata->regulators[cell->id];
        } else {
 #ifdef CONFIG_OF
                struct device_node *nproot = da9052->dev->of_node;
index 68c2002e78bf80d3b383b92f76519901459059da..5c9e680aa375a57c07a8977790271615ffb1499d 100644 (file)
@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
        struct se_portal_group *se_tpg = &base_tpg->se_tpg;
        struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
 
-       if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                 &se_tpg->tpg_group.cg_item)) {
+       if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 1);
                qlt_enable_vha(base_vha);
        }
@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
 
        if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 0);
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        complete(&base_tpg->tpg_base_comp);
 }
index bcdb22d5e215c9a393ccabe58f4f94ef132e3516..3c1850332a90212798ab5030554bc8fad39d9796 100644 (file)
@@ -4,6 +4,7 @@
 config MTK_PMIC_WRAP
        tristate "MediaTek PMIC Wrapper Support"
        depends on ARCH_MEDIATEK
+       depends on RESET_CONTROLLER
        select REGMAP
        help
          Say yes here to add support for MediaTek PMIC Wrapper found
index db5be1eec54c8db3977ea810e13c5470f416aaa7..f432291feee91e4b7c7b5ce3cc84f3b130933309 100644 (file)
@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
 static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
 {
        int ret;
-       u32 val;
-
-       val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
-       if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
-               pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
 
        ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
        if (ret)
@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
 static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
 {
        int ret;
-       u32 val;
-
-       val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
-       if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
-               pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
 
        ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
        if (ret)
@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
 
        *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
 
+       pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
        return 0;
 }
 
@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
 
 static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
 {
-       unsigned long rate_spi;
-       int ck_mhz;
-
-       rate_spi = clk_get_rate(wrp->clk_spi);
-
-       if (rate_spi > 26000000)
-               ck_mhz = 26;
-       else if (rate_spi > 18000000)
-               ck_mhz = 18;
-       else
-               ck_mhz = 0;
-
-       switch (ck_mhz) {
-       case 18:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
-               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
-               pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
-               pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
-               pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
-               break;
-       case 26:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+       if (pwrap_is_mt8135(wrp)) {
+               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
                pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
                pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
                pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
                pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
-               break;
-       case 0:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0xf, PWRAP_CSHEXT);
-               pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE);
-               pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
-               pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
-               pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
-               break;
-       default:
-               return -EINVAL;
+       } else {
+               pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+               pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+               pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
        }
 
        return 0;
index 09428412139e399979537da2e6272eda827a8757..c5352ea4821ea0df593c7043ac911ee891f103b0 100644 (file)
@@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc)
        u32 crystalfreq;
        const struct pmu0_plltab_entry *e = NULL;
 
-       crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
-                     SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
+       crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
+                      SSB_CHIPCO_PMU_CTL_XTALFREQ)  >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
        e = pmu0_plltab_find_entry(crystalfreq);
        BUG_ON(!e);
        return e->freq * 1000;
@@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
 
        switch (bus->chip_id) {
        case 0x5354:
-               ssb_pmu_get_alp_clock_clk0(cc);
+               return ssb_pmu_get_alp_clock_clk0(cc);
        default:
                ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
                        bus->chip_id);
index 15a7ee3859dd7dd74aee31593876b422093eff04..5fe1c22e289b881cacebbf081d7c245fad7d2098 100644 (file)
@@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
 
        /*
         * Accessing PCI config without a proper delay after devices reset (not
-        * GPIO reset) was causing reboots on WRT300N v1.0.
+        * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704).
         * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
         * completely. Flushing all writes was also tested but with no luck.
+        * The same problem was reported for WRT350N v1 (BCM4705), so we just
+        * sleep here unconditionally.
         */
-       if (pc->dev->bus->chip_id == 0x4704)
-               usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
 
        /* Enable PCI bridge BAR0 prefetch and burst */
        val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
index 5ff4716b72c311485084005b9e09a36021157530..784b5ecfa8493ba07d8ba90cde1b11b2b6a4b6b7 100644 (file)
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
 /*
  * Context: softirq
  */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
-                       int length, int offset, int total_size)
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
+                       u8 length, u16 offset, u16 total_size)
 {
        struct oz_port *port = hport;
        struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
        if (!urb)
                return;
        if (status == 0) {
-               int copy_len;
-               int required_size = urb->transfer_buffer_length;
+               unsigned int copy_len;
+               unsigned int required_size = urb->transfer_buffer_length;
 
                if (required_size > total_size)
                        required_size = total_size;
index 4249fa37401289c4caf1f4cae4d46dba321f276b..d2a6085345bec8c2e927115389efc46bfbad3019 100644 (file)
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
 
 /* Confirmation functions.
  */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
-       const u8 *desc, int length, int offset, int total_size);
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
+       const u8 *desc, u8 length, u16 offset, u16 total_size);
 void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
        const u8 *data, int data_len);
 
index d434d8c6fff67c04b58d6cac5c76a6832bae5bc3..f660bb198c65534a6cbe8183d3f5d0a30a532eb1 100644 (file)
@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
                        struct oz_multiple_fixed *body =
                                (struct oz_multiple_fixed *)data_hdr;
                        u8 *data = body->data;
-                       int n = (len - sizeof(struct oz_multiple_fixed)+1)
+                       unsigned int n;
+                       if (!body->unit_size ||
+                               len < sizeof(struct oz_multiple_fixed) - 1)
+                               break;
+                       n = (len - (sizeof(struct oz_multiple_fixed) - 1))
                                / body->unit_size;
                        while (n--) {
                                oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
        case OZ_GET_DESC_RSP: {
                        struct oz_get_desc_rsp *body =
                                (struct oz_get_desc_rsp *)usb_hdr;
-                       int data_len = elt->length -
-                                       sizeof(struct oz_get_desc_rsp) + 1;
-                       u16 offs = le16_to_cpu(get_unaligned(&body->offset));
-                       u16 total_size =
+                       u16 offs, total_size;
+                       u8 data_len;
+
+                       if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
+                               break;
+                       data_len = elt->length -
+                                       (sizeof(struct oz_get_desc_rsp) - 1);
+                       offs = le16_to_cpu(get_unaligned(&body->offset));
+                       total_size =
                                le16_to_cpu(get_unaligned(&body->total_size));
                        oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
                        oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
index f1d47a0676c3e3ba29ea974754c77e8a32a3f950..ada8d5dafd492e97a1b4d9457d25e4a485e67556 100644 (file)
@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                          IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedNoLinkBlinkInProgress = true;
@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedLinkBlinkInProgress = true;
@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                 pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedLinkBlinkInProgress = false;
                        }
                        pLed->bLedBlinkInProgress = true;
@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                 if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                 pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:
                if (pLed->bLedNoLinkBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                         pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                else
                        pLed->bLedWPSBlinkInProgress = true;
                pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedNoLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
                                return;
 
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
                pLed->CurrLedState = LED_ON;
                pLed->BlinkingLedState = LED_ON;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
 
@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
                pLed->CurrLedState = LED_ON;
                pLed->BlinkingLedState = LED_ON;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&(pLed->BlinkTimer));
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                } else
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->CurrLedState = LED_OFF;
@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
        case LED_CTL_START_TO_LINK:
                if (pLed1->bLedWPSBlinkInProgress) {
                        pLed1->bLedWPSBlinkInProgress = false;
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->BlinkingLedState = LED_OFF;
                        pLed1->CurrLedState = LED_OFF;
                        if (pLed1->bLedOn)
@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        pLed->bLedStartToLinkBlinkInProgress = true;
@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                if (LedAction == LED_CTL_LINK) {
                        if (pLed1->bLedWPSBlinkInProgress) {
                                pLed1->bLedWPSBlinkInProgress = false;
-                               del_timer_sync(&pLed1->BlinkTimer);
+                               del_timer(&pLed1->BlinkTimer);
                                pLed1->BlinkingLedState = LED_OFF;
                                pLed1->CurrLedState = LED_OFF;
                                if (pLed1->bLedOn)
@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedNoLinkBlinkInProgress = true;
@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        pLed->bLedBlinkInProgress = true;
@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed1->bLedWPSBlinkInProgress) {
                        pLed1->bLedWPSBlinkInProgress = false;
-                       del_timer_sync(&(pLed1->BlinkTimer));
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->BlinkingLedState = LED_OFF;
                        pLed1->CurrLedState = LED_OFF;
                        if (pLed1->bLedOn)
@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
                }
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:  /*WPS connect success*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:     /*WPS authentication fail*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                          msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
                /*LED1 settings*/
                if (pLed1->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                else
                        pLed1->bLedWPSBlinkInProgress = true;
                pLed1->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL_OVERLAP:     /*WPS session overlap*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                          msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
                /*LED1 settings*/
                if (pLed1->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                else
                        pLed1->bLedWPSBlinkInProgress = true;
                pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedNoLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedStartToLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedStartToLinkBlinkInProgress = false;
                }
                if (pLed1->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->bLedWPSBlinkInProgress = false;
                }
                pLed1->BlinkingLedState = LED_UNKNOWN;
@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
                        ; /* dummy branch */
                else if (pLed->bLedScanBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                SwLedOff(padapter, pLed);
@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
        case LED_CTL_STOP_WPS_FAIL:
        case LED_CTL_STOP_WPS:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->CurrLedState = LED_ON;
@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                SwLedOff(padapter, pLed);
index 1a1c38f885d6b191d5a62b5fb1aae26713dd6cb3..e35854d28f90ed96aa3ff149f39175c9e46b1373 100644 (file)
@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
        if (pcmd->res != H2C_SUCCESS)
                mod_timer(&pmlmepriv->assoc_timer,
                          jiffies + msecs_to_jiffies(1));
-       del_timer_sync(&pmlmepriv->assoc_timer);
+       del_timer(&pmlmepriv->assoc_timer);
 #ifdef __BIG_ENDIAN
        /* endian_convert */
        pnetwork->Length = le32_to_cpu(pnetwork->Length);
index fb2b195b90af0d1690552dfccb6ec93b13960fdf..c044b0e55ba93d0c989031d52ce99f4008ae0630 100644 (file)
@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
        spin_lock_irqsave(&pmlmepriv->lock, irqL);
 
        if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
-               del_timer_sync(&pmlmepriv->scan_to_timer);
+               del_timer(&pmlmepriv->scan_to_timer);
 
                _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
        }
@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
        }
        if (padapter->pwrctrlpriv.pwr_mode !=
            padapter->registrypriv.power_mgnt) {
-               del_timer_sync(&pmlmepriv->dhcp_timer);
+               del_timer(&pmlmepriv->dhcp_timer);
                r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
                                  padapter->registrypriv.smart_ps);
        }
@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
                        if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
                                == true)
                                r8712_indicate_connect(adapter);
-                       del_timer_sync(&pmlmepriv->assoc_timer);
+                       del_timer(&pmlmepriv->assoc_timer);
                } else
                        goto ignore_joinbss_callback;
        } else {
index aaa584435c87d25d3efb3bbbe794da6cf2096c24..9bc04f474d18d7c79311c8bd6fc80b48015a6550 100644 (file)
@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
 
        if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
                return;
-       del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer);
+       del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
        _enter_pwrlock(&pwrpriv->lock);
        pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
        if (pwrpriv->cpwm >= PS_STATE_S2) {
index 7bb96c47f1883dad0c62e8618b2e98ac773fca27..a9b93d0f6f566b83bb00271de37f68dc1716586c 100644 (file)
@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
         * cancel reordering_ctrl_timer */
        for (i = 0; i < 16; i++) {
                preorder_ctrl = &psta->recvreorder_ctrl[i];
-               del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+               del_timer(&preorder_ctrl->reordering_ctrl_timer);
        }
        spin_lock(&(pfree_sta_queue->lock));
        /* insert into free_sta_queue; 20061114 */
index 34871a628b11124e093231694b0b0fb14b62de80..74e6114ff18f9343e3012cf21c7faadbdf5c6f61 100644 (file)
@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
         * Here we serialize access across the TIQN+TPG Tuple.
         */
        ret = down_interruptible(&tpg->np_login_sem);
-       if ((ret != 0) || signal_pending(current))
+       if (ret != 0)
                return -1;
 
        spin_lock_bh(&tpg->tpg_state_lock);
index 8ce94ff744e6ba1dfd131e5e59a3b18a75639bcb..70d799dfab03c2e3b616b06a635c2a63e8941fda 100644 (file)
@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1(
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
+               kfree(sess->sess_ops);
                kfree(sess);
                return -ENOMEM;
        }
index e8a240818353bb54e2fdf9bb14cb194081c519c2..5e3295fe404d7cc93aae6354f2578bcbca55ee23 100644 (file)
@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
 int iscsit_get_tpg(
        struct iscsi_portal_group *tpg)
 {
-       int ret;
-
-       ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
-       return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+       return mutex_lock_interruptible(&tpg->tpg_access_lock);
 }
 
 void iscsit_put_tpg(struct iscsi_portal_group *tpg)
index 75cbde1f7c5b6e34ea7060011c2aca817e4e55f2..4f8d4d459aa4f936a09438cc076fc998a75aa783 100644 (file)
@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (!port)
@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata(
 
 int core_setup_alua(struct se_device *dev)
 {
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                struct t10_alua_lu_gp_member *lu_gp_mem;
 
index ddaf76a4ac2aab3c00e70607a76c90a6cb308389..e7b0430a0575d0403dbb38b0fd4d41df1ccce79d 100644 (file)
@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric(
 
        pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
                        " %s\n", tf->tf_group.cg_item.ci_name);
-       /*
-        * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
-        */
-       tf->tf_ops.tf_subsys = tf->tf_subsys;
        tf->tf_fabric = &tf->tf_group.cg_item;
        pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
                        " for %s\n", name);
@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = {
        },
 };
 
-struct configfs_subsystem *target_core_subsystem[] = {
-       &target_core_fabrics,
-       NULL,
-};
+int target_depend_item(struct config_item *item)
+{
+       return configfs_depend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_depend_item);
+
+void target_undepend_item(struct config_item *item)
+{
+       return configfs_undepend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_undepend_item);
 
 /*##############################################################################
 // Start functions called by external Target Fabrics Modules
@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo)
         * struct target_fabric_configfs->tf_cit_tmpl
         */
        tf->tf_module = fo->module;
-       tf->tf_subsys = target_core_subsystem[0];
        snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
 
        tf->tf_ops = *fo;
@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
 {
        int ret;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "Passthrough\n");
 
        spin_lock(&dev->dev_reservation_lock);
@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type);
 static ssize_t target_core_dev_pr_show_attr_res_type(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "SPC_PASSTHROUGH\n");
        else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "APTPL Bit Status: %s\n",
@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u16 port_rpti = 0, tpgt = 0;
        u8 type = 0, scope;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void)
 {
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
        struct config_group *lu_gp_cg = NULL;
-       struct configfs_subsystem *subsys;
+       struct configfs_subsystem *subsys = &target_core_fabrics;
        struct t10_alua_lu_gp *lu_gp;
        int ret;
 
@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void)
                " Engine: %s on %s/%s on "UTS_RELEASE"\n",
                TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
 
-       subsys = target_core_subsystem[0];
        config_group_init(&subsys->su_group);
        mutex_init(&subsys->su_mutex);
 
@@ -3008,13 +3009,10 @@ out_global:
 
 static void __exit target_core_exit_configfs(void)
 {
-       struct configfs_subsystem *subsys;
        struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
        struct config_item *item;
        int i;
 
-       subsys = target_core_subsystem[0];
-
        lu_gp_cg = &alua_lu_gps_group;
        for (i = 0; lu_gp_cg->default_groups[i]; i++) {
                item = &lu_gp_cg->default_groups[i]->cg_item;
@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void)
         * We expect subsys->su_group.default_groups to be released
         * by configfs subsystem provider logic..
         */
-       configfs_unregister_subsystem(subsys);
-       kfree(subsys->su_group.default_groups);
+       configfs_unregister_subsystem(&target_core_fabrics);
+       kfree(target_core_fabrics.su_group.default_groups);
 
        core_alua_free_lu_gp(default_lu_gp);
        default_lu_gp = NULL;
index 7faa6aef9a4d5429cbf1d3810ebb181f7a911beb..ce5f768181ff6593a7afac365214c77b0f0aceab 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kthread.h>
 #include <linux/in.h>
 #include <linux/export.h>
+#include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <scsi/scsi.h>
@@ -527,7 +528,7 @@ static void core_export_port(
        list_add_tail(&port->sep_list, &dev->dev_sep_list);
        spin_unlock(&dev->se_port_lock);
 
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
                if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev)
         * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
         * passthrough because this is being provided by the backend LLD.
         */
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
                strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
                strncpy(&dev->t10_wwn.model[0],
                        dev->transport->inquiry_prod, 16);
@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void)
                target_free_device(g_lun0_dev);
        core_delete_hba(hba);
 }
+
+/*
+ * Common CDB parsing for kernel and user passthrough.
+ */
+sense_reason_t
+passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
+{
+       unsigned char *cdb = cmd->t_task_cdb;
+
+       /*
+        * Clear a lun set in the cdb if the initiator talking to use spoke
+        * and old standards version, as we can't assume the underlying device
+        * won't choke up on it.
+        */
+       switch (cdb[0]) {
+       case READ_10: /* SBC - RDProtect */
+       case READ_12: /* SBC - RDProtect */
+       case READ_16: /* SBC - RDProtect */
+       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+       case VERIFY: /* SBC - VRProtect */
+       case VERIFY_16: /* SBC - VRProtect */
+       case WRITE_VERIFY: /* SBC - VRProtect */
+       case WRITE_VERIFY_12: /* SBC - VRProtect */
+       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+               break;
+       default:
+               cdb[1] &= 0x1f; /* clear logical unit number */
+               break;
+       }
+
+       /*
+        * For REPORT LUNS we always need to emulate the response, for everything
+        * else, pass it up.
+        */
+       if (cdb[0] == REPORT_LUNS) {
+               cmd->execute_cmd = spc_emulate_report_luns;
+               return TCM_NO_SENSE;
+       }
+
+       /* Set DATA_CDB flag for ops that should have it */
+       switch (cdb[0]) {
+       case READ_6:
+       case READ_10:
+       case READ_12:
+       case READ_16:
+       case WRITE_6:
+       case WRITE_10:
+       case WRITE_12:
+       case WRITE_16:
+       case WRITE_VERIFY:
+       case WRITE_VERIFY_12:
+       case 0x8e: /* WRITE_VERIFY_16 */
+       case COMPARE_AND_WRITE:
+       case XDWRITEREAD_10:
+               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+               break;
+       case VARIABLE_LENGTH_CMD:
+               switch (get_unaligned_be16(&cdb[8])) {
+               case READ_32:
+               case WRITE_32:
+               case 0x0c: /* WRITE_VERIFY_32 */
+               case XDWRITEREAD_32:
+                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+                       break;
+               }
+       }
+
+       cmd->execute_cmd = exec_cmd;
+
+       return TCM_NO_SENSE;
+}
+EXPORT_SYMBOL(passthrough_parse_cdb);
index f7e6e51aed3614aa35e8a58c0bb1d2cfcc462141..3f27bfd816d87201c5f3cec3ad7857ead488191b 100644 (file)
@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = {
        .inquiry_prod           = "FILEIO",
        .inquiry_rev            = FD_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = fd_attach_hba,
        .detach_hba             = fd_detach_hba,
        .alloc_device           = fd_alloc_device,
index 1b7947c2510fc8c65872127738c83ccbb34cf6a3..8c965683789f9e141233edac76593e156a58bd2f 100644 (file)
@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = {
        .inquiry_prod           = "IBLOCK",
        .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = iblock_attach_hba,
        .detach_hba             = iblock_detach_hba,
        .alloc_device           = iblock_alloc_device,
index 874a9bc988d807a615a9ed7516041bfe54176c4b..68bd7f5d9f73cf6feacd2dfefb951db99dd21c4f 100644 (file)
@@ -4,9 +4,6 @@
 /* target_core_alua.c */
 extern struct t10_alua_lu_gp *default_lu_gp;
 
-/* target_core_configfs.c */
-extern struct configfs_subsystem *target_core_subsystem[];
-
 /* target_core_device.c */
 extern struct mutex g_device_mutex;
 extern struct list_head g_device_list;
index c1aa9655e96ec13881bdee2040254887fde0e903..a15411c79ae99649041c216439e938f52a7c071a 100644 (file)
@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations(
 
 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
 {
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
+       return target_depend_item(&tpg->tpg_group.cg_item);
 }
 
 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
 {
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
-
+       target_undepend_item(&tpg->tpg_group.cg_item);
        atomic_dec_mb(&tpg->tpg_pr_ref_count);
 }
 
 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
        if (nacl->dynamic_node_acl)
                return 0;
-
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
+       return target_depend_item(&nacl->acl_group.cg_item);
 }
 
 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
-       if (nacl->dynamic_node_acl) {
-               atomic_dec_mb(&nacl->acl_pr_ref_count);
-               return;
-       }
-
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
-
+       if (!nacl->dynamic_node_acl)
+               target_undepend_item(&nacl->acl_group.cg_item);
        atomic_dec_mb(&nacl->acl_pr_ref_count);
 }
 
@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
+       return target_depend_item(&lun_acl->se_lun_group.cg_item);
 }
 
 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
-
+       target_undepend_item(&lun_acl->se_lun_group.cg_item);
        atomic_dec_mb(&se_deve->pr_ref_count);
 }
 
@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd)
                return 0;
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        spin_lock(&dev->dev_reservation_lock);
index f6c954c4635f5dab27ac24ea117dd284561c836a..ecc5eaef13d6c38956a213a784ae66e1b4e0411b 100644 (file)
@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
                                        " pdv_host_id: %d\n", pdv->pdv_host_id);
                                return -EINVAL;
                        }
+                       pdv->pdv_lld_host = sh;
                }
        } else {
                if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
                if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
                    (phv->phv_lld_host != NULL))
                        scsi_host_put(phv->phv_lld_host);
+               else if (pdv->pdv_lld_host)
+                       scsi_host_put(pdv->pdv_lld_host);
 
                if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
                        scsi_device_put(sd);
@@ -970,64 +973,13 @@ fail:
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
-/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
-static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
-{
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-}
-
 static sense_reason_t
 pscsi_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-
        if (cmd->se_cmd_flags & SCF_BIDI)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
-       pscsi_clear_cdb_lun(cdb);
-
-       /*
-        * For REPORT LUNS we always need to emulate the response, for everything
-        * else the default for pSCSI is to pass the command to the underlying
-        * LLD / physical hardware.
-        */
-       switch (cdb[0]) {
-       case REPORT_LUNS:
-               cmd->execute_cmd = spc_emulate_report_luns;
-               return 0;
-       case READ_6:
-       case READ_10:
-       case READ_12:
-       case READ_16:
-       case WRITE_6:
-       case WRITE_10:
-       case WRITE_12:
-       case WRITE_16:
-       case WRITE_VERIFY:
-               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               /* FALLTHROUGH*/
-       default:
-               cmd->execute_cmd = pscsi_execute_cmd;
-               return 0;
-       }
+       return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
 }
 
 static sense_reason_t
@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
 static struct se_subsystem_api pscsi_template = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_PHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
index 1bd757dff8eee3806cae1d3da6ce33804e5deb17..820d3052b775caf438912d703402ebb4172d7f31 100644 (file)
@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
        int     pdv_lun_id;
        struct block_device *pdv_bd;
        struct scsi_device *pdv_sd;
+       struct Scsi_Host *pdv_lld_host;
 } ____cacheline_aligned;
 
 typedef enum phv_modes {
index a263bf5fab8d4538384f557aef1a3df7df3d9792..d16489b6a1a4767ef4a8ba9445998a7bff2845d8 100644 (file)
@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = {
        .name                   = "rd_mcp",
        .inquiry_prod           = "RAMDISK-MCP",
        .inquiry_rev            = RD_MCP_VERSION,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
        .attach_hba             = rd_attach_hba,
        .detach_hba             = rd_detach_hba,
        .alloc_device           = rd_alloc_device,
index 8855781ac653026aa0b513340b150e5f33f05f28..733824e3825f4845e9035b9f00a7d553b9d59d6e 100644 (file)
@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
         * comparision using SGLs at cmd->t_bidi_data_sg..
         */
        rc = down_interruptible(&dev->caw_sem);
-       if ((rc != 0) || signal_pending(current)) {
+       if (rc != 0) {
                cmd->transport_complete_callback = NULL;
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
index 3fe5cb240b6f6a5b4c8a3fb42396b77dd5701f74..675f2d9d1f14c69142d63179afa38e5b74255243 100644 (file)
@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
         * Check if SAM Task Attribute emulation is enabled for this
         * struct se_device storage object
         */
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
                                                   sectors, 0, NULL, 0);
                if (unlikely(cmd->pi_err)) {
                        spin_lock_irq(&cmd->t_state_lock);
-                       cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
                        spin_unlock_irq(&cmd->t_state_lock);
                        transport_generic_request_failure(cmd, cmd->pi_err);
                        return -1;
@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return false;
 
        /*
@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd)
 
        if (target_handle_task_attr(cmd)) {
                spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+               cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
                spin_unlock_irq(&cmd->t_state_lock);
                return;
        }
@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return;
 
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
        case DMA_TO_DEVICE:
                if (cmd->se_cmd_flags & SCF_BIDI) {
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret < 0)
-                               break;
+                       break;
                }
                /* Fall through for DMA_TO_DEVICE */
        case DMA_NONE:
index dbc872a6c9816e95211f5b93bb9f623233d249ba..07d2996d8c1fe922334ee57dfe4d27fd9d7685f8 100644 (file)
@@ -71,13 +71,6 @@ struct tcmu_hba {
        u32 host_id;
 };
 
-/* User wants all cmds or just some */
-enum passthru_level {
-       TCMU_PASS_ALL = 0,
-       TCMU_PASS_IO,
-       TCMU_PASS_INVALID,
-};
-
 #define TCMU_CONFIG_LEN 256
 
 struct tcmu_dev {
@@ -89,7 +82,6 @@ struct tcmu_dev {
 #define TCMU_DEV_BIT_OPEN 0
 #define TCMU_DEV_BIT_BROKEN 1
        unsigned long flags;
-       enum passthru_level pass_level;
 
        struct uio_info uio_info;
 
@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        setup_timer(&udev->timeout, tcmu_device_timedout,
                (unsigned long)udev);
 
-       udev->pass_level = TCMU_PASS_ALL;
-
        return &udev->se_dev;
 }
 
@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
 }
 
 enum {
-       Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
+       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
 };
 
 static match_table_t tokens = {
        {Opt_dev_config, "dev_config=%s"},
        {Opt_dev_size, "dev_size=%u"},
-       {Opt_pass_level, "pass_level=%u"},
+       {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_err, NULL}
 };
 
@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
        char *orig, *ptr, *opts, *arg_p;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
-       int arg;
+       unsigned long tmp_ul;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                        if (ret < 0)
                                pr_err("kstrtoul() failed for dev_size=\n");
                        break;
-               case Opt_pass_level:
-                       match_int(args, &arg);
-                       if (arg >= TCMU_PASS_INVALID) {
-                               pr_warn("TCMU: Invalid pass_level: %d\n", arg);
+               case Opt_hw_block_size:
+                       arg_p = match_strdup(&args[0]);
+                       if (!arg_p) {
+                               ret = -ENOMEM;
                                break;
                        }
-
-                       pr_debug("TCMU: Setting pass_level to %d\n", arg);
-                       udev->pass_level = arg;
+                       ret = kstrtoul(arg_p, 0, &tmp_ul);
+                       kfree(arg_p);
+                       if (ret < 0) {
+                               pr_err("kstrtoul() failed for hw_block_size=\n");
+                               break;
+                       }
+                       if (!tmp_ul) {
+                               pr_err("hw_block_size must be nonzero\n");
+                               break;
+                       }
+                       dev->dev_attrib.hw_block_size = tmp_ul;
                        break;
                default:
                        break;
@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
 
        bl = sprintf(b + bl, "Config: %s ",
                     udev->dev_config[0] ? udev->dev_config : "NULL");
-       bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
-                     udev->dev_size, udev->pass_level);
+       bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
 
        return bl;
 }
@@ -1038,20 +1035,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
                       dev->dev_attrib.block_size);
 }
 
-static sense_reason_t
-tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
-               enum dma_data_direction data_direction)
-{
-       int ret;
-
-       ret = tcmu_queue_cmd(se_cmd);
-
-       if (ret != 0)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       else
-               return TCM_NO_SENSE;
-}
-
 static sense_reason_t
 tcmu_pass_op(struct se_cmd *se_cmd)
 {
@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd)
                return TCM_NO_SENSE;
 }
 
-static struct sbc_ops tcmu_sbc_ops = {
-       .execute_rw = tcmu_execute_rw,
-       .execute_sync_cache     = tcmu_pass_op,
-       .execute_write_same     = tcmu_pass_op,
-       .execute_write_same_unmap = tcmu_pass_op,
-       .execute_unmap          = tcmu_pass_op,
-};
-
 static sense_reason_t
 tcmu_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-       struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
-       sense_reason_t ret;
-
-       switch (udev->pass_level) {
-       case TCMU_PASS_ALL:
-               /* We're just like pscsi, then */
-               /*
-                * For REPORT LUNS we always need to emulate the response, for everything
-                * else, pass it up.
-                */
-               switch (cdb[0]) {
-               case REPORT_LUNS:
-                       cmd->execute_cmd = spc_emulate_report_luns;
-                       break;
-               case READ_6:
-               case READ_10:
-               case READ_12:
-               case READ_16:
-               case WRITE_6:
-               case WRITE_10:
-               case WRITE_12:
-               case WRITE_16:
-               case WRITE_VERIFY:
-                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-                       /* FALLTHROUGH */
-               default:
-                       cmd->execute_cmd = tcmu_pass_op;
-               }
-               ret = TCM_NO_SENSE;
-               break;
-       case TCMU_PASS_IO:
-               ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
-               break;
-       default:
-               pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
-               ret = TCM_CHECK_CONDITION_ABORT_CMD;
-       }
-
-       return ret;
+       return passthrough_parse_cdb(cmd, tcmu_pass_op);
 }
 
-DEF_TB_DEFAULT_ATTRIBS(tcmu);
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
+TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
+TB_DEV_ATTR_RO(tcmu, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
+TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
+TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
 
 static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
-       &tcmu_dev_attrib_emulate_model_alias.attr,
-       &tcmu_dev_attrib_emulate_dpo.attr,
-       &tcmu_dev_attrib_emulate_fua_write.attr,
-       &tcmu_dev_attrib_emulate_fua_read.attr,
-       &tcmu_dev_attrib_emulate_write_cache.attr,
-       &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &tcmu_dev_attrib_emulate_tas.attr,
-       &tcmu_dev_attrib_emulate_tpu.attr,
-       &tcmu_dev_attrib_emulate_tpws.attr,
-       &tcmu_dev_attrib_emulate_caw.attr,
-       &tcmu_dev_attrib_emulate_3pc.attr,
-       &tcmu_dev_attrib_pi_prot_type.attr,
        &tcmu_dev_attrib_hw_pi_prot_type.attr,
-       &tcmu_dev_attrib_pi_prot_format.attr,
-       &tcmu_dev_attrib_enforce_pr_isids.attr,
-       &tcmu_dev_attrib_is_nonrot.attr,
-       &tcmu_dev_attrib_emulate_rest_reord.attr,
-       &tcmu_dev_attrib_force_pr_aptpl.attr,
        &tcmu_dev_attrib_hw_block_size.attr,
-       &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
-       &tcmu_dev_attrib_queue_depth.attr,
-       &tcmu_dev_attrib_max_unmap_lba_count.attr,
-       &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
-       &tcmu_dev_attrib_unmap_granularity.attr,
-       &tcmu_dev_attrib_unmap_granularity_alignment.attr,
-       &tcmu_dev_attrib_max_write_same_len.attr,
        NULL,
 };
 
@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = {
        .inquiry_prod           = "USER",
        .inquiry_rev            = TCMU_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = tcmu_attach_hba,
        .detach_hba             = tcmu_detach_hba,
        .alloc_device           = tcmu_alloc_device,
index a600ff15dcfd1674140170b0808d494db64333ea..8fd680ac941bde49cd7803134da5beb77c7092b0 100644 (file)
@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                        bool src)
 {
        struct se_device *se_dev;
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
        int rc;
 
@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                " se_dev\n", xop->src_dev);
                }
 
-               rc = configfs_depend_item(subsys,
-                               &se_dev->dev_group.cg_item);
+               rc = target_depend_item(&se_dev->dev_group.cg_item);
                if (rc != 0) {
                        pr_err("configfs_depend_item attempt failed:"
                                " %d for se_dev: %p\n", rc, se_dev);
@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                        return rc;
                }
 
-               pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
-                       " se_dev->se_dev_group: %p\n", subsys, se_dev,
+               pr_debug("Called configfs_depend_item for se_dev: %p"
+                       " se_dev->se_dev_group: %p\n", se_dev,
                        &se_dev->dev_group);
 
                mutex_unlock(&g_device_mutex);
@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 
 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 {
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        struct se_device *remote_dev;
 
        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
        else
                remote_dev = xop->src_dev;
 
-       pr_debug("Calling configfs_undepend_item for subsys: %p"
+       pr_debug("Calling configfs_undepend_item for"
                  " remote_dev: %p remote_dev->dev_group: %p\n",
-                 subsys, remote_dev, &remote_dev->dev_group.cg_item);
+                 remote_dev, &remote_dev->dev_group.cg_item);
 
-       configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+       target_undepend_item(&remote_dev->dev_group.cg_item);
 }
 
 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
index 04d9e23d1ee16a508e0e0b1331407bcf6b20a94b..358323c83b4f340dec1a915ef923145fb972d933 100644 (file)
@@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty {
 static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
                                        unsigned int offs, unsigned int data)
 {
-       iowrite32(data, priv->reg + offs);
+       __raw_writel(data, priv->reg + offs);
 }
 
 static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
                                               unsigned int offs)
 {
-       return ioread32(priv->reg + offs);
+       return __raw_readl(priv->reg + offs);
 }
 
 /* Encoding of byte stream in FDC words */
@@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
                s += inc[word.bytes - 1];
 
                /* Busy wait until there's space in fifo */
-               while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+               while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                        ;
-               iowrite32(word.word, regs + REG_FDTX(c->index));
+               __raw_writel(word.word, regs + REG_FDTX(c->index));
        }
 out:
        local_irq_restore(flags);
@@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void)
 
                /* Read next word from KGDB channel */
                do {
-                       stat = ioread32(regs + REG_FDSTAT);
+                       stat = __raw_readl(regs + REG_FDSTAT);
 
                        /* No data waiting? */
                        if (stat & REG_FDSTAT_RXE)
@@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void)
                        /* Read next word */
                        channel = (stat & REG_FDSTAT_RXCHAN) >>
                                        REG_FDSTAT_RXCHAN_SHIFT;
-                       data = ioread32(regs + REG_FDRX);
+                       data = __raw_readl(regs + REG_FDRX);
                } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
 
                /* Decode into rbuf */
@@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void)
                return;
 
        /* Busy wait until there's space in fifo */
-       while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+       while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                ;
-       iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
+       __raw_writel(word.word,
+                    regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
 }
 
 /* flush the whole write buffer to the TX FIFO */
index cc57a3a6b02b348df95c827fd2c770e59ffca155..396344cb011fd1fafab05c3ddeeff1841e13e055 100644 (file)
@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
        return put_user(x, ptr);
 }
 
+static inline int tty_copy_to_user(struct tty_struct *tty,
+                                       void __user *to,
+                                       const void *from,
+                                       unsigned long n)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       tty_audit_add_data(tty, to, n, ldata->icanon);
+       return copy_to_user(to, from, n);
+}
+
 /**
  *     n_tty_kick_worker - start input worker (if required)
  *     @tty: terminal
@@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
 
        size = N_TTY_BUF_SIZE - tail;
        n = eol - tail;
-       if (n > 4096)
-               n += 4096;
+       if (n > N_TTY_BUF_SIZE)
+               n += N_TTY_BUF_SIZE;
        n += found;
        c = n;
 
@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
                    __func__, eol, found, n, c, size, more);
 
        if (n > size) {
-               ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
+               ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
                if (ret)
                        return -EFAULT;
-               ret = copy_to_user(*b + size, ldata->read_buf, n - size);
+               ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
        } else
-               ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
+               ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
 
        if (ret)
                return -EFAULT;
index 9289999cb7c62bb05b2a4b758fa76d5ce9413316..dce1a23706e86531d3caa86ba4b03c36b03bf3cf 100644 (file)
@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+#ifdef CONFIG_SERIAL_8250_DMA
+static int omap_8250_dma_handle_irq(struct uart_port *port);
+#endif
+
+static irqreturn_t omap8250_irq(int irq, void *dev_id)
+{
+       struct uart_port *port = dev_id;
+       struct uart_8250_port *up = up_to_u8250p(port);
+       unsigned int iir;
+       int ret;
+
+#ifdef CONFIG_SERIAL_8250_DMA
+       if (up->dma) {
+               ret = omap_8250_dma_handle_irq(port);
+               return IRQ_RETVAL(ret);
+       }
+#endif
+
+       serial8250_rpm_get(up);
+       iir = serial_port_in(port, UART_IIR);
+       ret = serial8250_handle_irq(port, iir);
+       serial8250_rpm_put(up);
+
+       return IRQ_RETVAL(ret);
+}
+
 static int omap_8250_startup(struct uart_port *port)
 {
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
+       struct uart_8250_port *up = up_to_u8250p(port);
        struct omap8250_priv *priv = port->private_data;
-
        int ret;
 
        if (priv->wakeirq) {
@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port)
 
        pm_runtime_get_sync(port->dev);
 
-       ret = serial8250_do_startup(port);
-       if (ret)
+       up->mcr = 0;
+       serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+
+       serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+       up->lsr_saved_flags = 0;
+       up->msr_saved_flags = 0;
+
+       if (up->dma) {
+               ret = serial8250_request_dma(up);
+               if (ret) {
+                       dev_warn_ratelimited(port->dev,
+                                            "failed to request DMA\n");
+                       up->dma = NULL;
+               }
+       }
+
+       ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
+                         dev_name(port->dev), port);
+       if (ret < 0)
                goto err;
 
+       up->ier = UART_IER_RLSI | UART_IER_RDI;
+       serial_out(up, UART_IER, up->ier);
+
 #ifdef CONFIG_PM
        up->capabilities |= UART_CAP_RPM;
 #endif
@@ -610,8 +655,7 @@ err:
 
 static void omap_8250_shutdown(struct uart_port *port)
 {
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
+       struct uart_8250_port *up = up_to_u8250p(port);
        struct omap8250_priv *priv = port->private_data;
 
        flush_work(&priv->qos_work);
@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port)
        pm_runtime_get_sync(port->dev);
 
        serial_out(up, UART_OMAP_WER, 0);
-       serial8250_do_shutdown(port);
+
+       up->ier = 0;
+       serial_out(up, UART_IER, 0);
+
+       if (up->dma)
+               serial8250_release_dma(up);
+
+       /*
+        * Disable break condition and FIFOs
+        */
+       if (up->lcr & UART_LCR_SBC)
+               serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
+       serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
 
        pm_runtime_mark_last_busy(port->dev);
        pm_runtime_put_autosuspend(port->dev);
 
+       free_irq(port->irq, port);
        if (priv->wakeirq)
                free_irq(priv->wakeirq, port);
 }
@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
 }
 #endif
 
+static int omap8250_no_handle_irq(struct uart_port *port)
+{
+       /* IRQ has not been requested but handling irq? */
+       WARN_ONCE(1, "Unexpected irq handling before port startup\n");
+       return 0;
+}
+
 static int omap8250_probe(struct platform_device *pdev)
 {
        struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        omap_serial_fill_features_erratas(&up, priv);
+       up.port.handle_irq = omap8250_no_handle_irq;
 #ifdef CONFIG_SERIAL_8250_DMA
        if (pdev->dev.of_node) {
                /*
@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev)
                ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
                if (ret == 2) {
                        up.dma = &priv->omap8250_dma;
-                       up.port.handle_irq = omap_8250_dma_handle_irq;
                        priv->omap8250_dma.fn = the_no_dma_filter_fn;
                        priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
                        priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
index 6f5a0720a8c8eead6c23f37c359c516730013cef..763eb20fe3213b6cfda04dc2624bcd1b8638f324 100644 (file)
@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock)
 
 /*
  * Transmit a character
- * There must be at least one free entry in the TX FIFO to accept the char.
  *
- * Returns true if the FIFO might have space in it afterwards;
- * returns false if the FIFO definitely became full.
+ * Returns true if the character was successfully queued to the FIFO.
+ * Returns false otherwise.
  */
 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
 {
+       if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+               return false; /* unable to transmit character */
+
        writew(c, uap->port.membase + UART01x_DR);
        uap->port.icount.tx++;
 
-       if (likely(uap->tx_irq_seen > 1))
-               return true;
-
-       return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
+       return true;
 }
 
 static bool pl011_tx_chars(struct uart_amba_port *uap)
@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap)
                return false;
 
        if (uap->port.x_char) {
-               pl011_tx_char(uap, uap->port.x_char);
+               if (!pl011_tx_char(uap, uap->port.x_char))
+                       goto done;
                uap->port.x_char = 0;
                --count;
        }
index c8cfa06371280af6abfd63bd379ee5c121523ad7..88250395b0ce96486a2dac5e2e9162fb7f4eae43 100644 (file)
@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data)
 
        status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
        count = RX_BUF_SIZE - state.residue;
+
+       if (readl(sport->port.membase + USR2) & USR2_IDLE) {
+               /* In condition [3] the SDMA counted up too early */
+               count--;
+
+               writel(USR2_IDLE, sport->port.membase + USR2);
+       }
+
        dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
 
        if (count) {
index fdab715a063119d6e696a8f66ea26d4a1613e983..c0eafa6fd40314086474f5b7cab8f63361c73d64 100644 (file)
 #define DWC3_DGCMD_SET_ENDPOINT_NRDY   0x0c
 #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK        0x10
 
-#define DWC3_DGCMD_STATUS(n)           (((n) >> 15) & 1)
+#define DWC3_DGCMD_STATUS(n)           (((n) >> 12) & 0x0F)
 #define DWC3_DGCMD_CMDACT              (1 << 10)
 #define DWC3_DGCMD_CMDIOC              (1 << 8)
 
 #define DWC3_DEPCMD_PARAM_SHIFT                16
 #define DWC3_DEPCMD_PARAM(x)           ((x) << DWC3_DEPCMD_PARAM_SHIFT)
 #define DWC3_DEPCMD_GET_RSC_IDX(x)     (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
-#define DWC3_DEPCMD_STATUS(x)          (((x) >> 15) & 1)
+#define DWC3_DEPCMD_STATUS(x)          (((x) >> 12) & 0x0F)
 #define DWC3_DEPCMD_HIPRI_FORCERM      (1 << 11)
 #define DWC3_DEPCMD_CMDACT             (1 << 10)
 #define DWC3_DEPCMD_CMDIOC             (1 << 8)
index 6bdb5706904497ca9eccb7fd5d979c67824d8600..3507f880eb74294c76ddbc43c3aa153528478f53 100644 (file)
@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
                                return ret;
                        }
 
-                       set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
                        return len;
                }
                break;
@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                                ret = ep->status;
                                if (io_data->read && ret > 0) {
                                        ret = copy_to_iter(data, ret, &io_data->data);
-                                       if (unlikely(iov_iter_count(&io_data->data)))
+                                       if (!ret)
                                                ret = -EFAULT;
                                }
                        }
@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
 {
        ENTER();
 
-       if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
-               ffs_closed(ffs);
+       ffs_closed(ffs);
 
        BUG_ON(ffs->gadget);
 
@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
        ffs_obj->desc_ready = true;
        ffs_obj->ffs_data = ffs;
 
-       if (ffs_obj->ffs_ready_callback)
+       if (ffs_obj->ffs_ready_callback) {
                ret = ffs_obj->ffs_ready_callback(ffs);
+               if (ret)
+                       goto done;
+       }
 
+       set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
 done:
        ffs_dev_unlock();
        return ret;
@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
 
        ffs_obj->desc_ready = false;
 
-       if (ffs_obj->ffs_closed_callback)
+       if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
+           ffs_obj->ffs_closed_callback)
                ffs_obj->ffs_closed_callback(ffs);
 
        if (!ffs_obj->opts || ffs_obj->opts->no_configfs
index 259b656c0b3ec7bde9e119488f46ded351bb7300..6316aa5b1c4947a6df2e08b4c45856dc77b94374 100644 (file)
@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
        int result;
 
        mutex_lock(&opts->lock);
-       result = strlcpy(page, opts->id, PAGE_SIZE);
+       if (opts->id) {
+               result = strlcpy(page, opts->id, PAGE_SIZE);
+       } else {
+               page[0] = 0;
+               result = 0;
+       }
+
        mutex_unlock(&opts->lock);
 
        return result;
index 9719abfb61455ca91ec5d1721e53622d4b76f1ef..7856b3394494b7d4250637277dd1f42f45d7a1ea 100644 (file)
@@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 
        if (intf == 1) {
                if (alt == 1) {
-                       config_ep_by_speed(cdev->gadget, f, out_ep);
+                       err = config_ep_by_speed(cdev->gadget, f, out_ep);
+                       if (err)
+                               return err;
+
                        usb_ep_enable(out_ep);
                        out_ep->driver_data = audio;
                        audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
index 7b9ef7e257d236dd442226203301a59bbd59ef47..e821931c965cd9203a8011358ffeb16844dc7eed 100644 (file)
@@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
        gfs_registered = true;
 
        ret = usb_composite_probe(&gfs_driver);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
+               ++missing_funcs;
                gfs_registered = false;
+       }
        
        return ret;
 }
index b808951491ccbfcdd949d8e78f7c1cc2b4c55f47..99fd9a5667dfd4997092d982c0beae28b578a17c 100644 (file)
@@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
 
        dprintk(DEBUG_NORMAL, "%s()\n", __func__);
 
-       s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
+       s3c2410_udc_set_pullup(udc, is_on);
        return 0;
 }
 
index ec8ac16748547a2ac87bf9aa225ed0a36c0bf7df..36bf089b708fe5219258d46305719b7a999a23f6 100644 (file)
@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        unsigned long flags;
-       int ret;
+       int ret, slot_id;
        struct xhci_command *command;
 
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
        if (!command)
                return 0;
 
+       /* xhci->slot_id and xhci->addr_dev are not thread-safe */
+       mutex_lock(&xhci->mutex);
        spin_lock_irqsave(&xhci->lock, flags);
        command->completion = &xhci->addr_dev;
        ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
        if (ret) {
                spin_unlock_irqrestore(&xhci->lock, flags);
+               mutex_unlock(&xhci->mutex);
                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
                kfree(command);
                return 0;
@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
        spin_unlock_irqrestore(&xhci->lock, flags);
 
        wait_for_completion(command->completion);
+       slot_id = xhci->slot_id;
+       mutex_unlock(&xhci->mutex);
 
-       if (!xhci->slot_id || command->status != COMP_SUCCESS) {
+       if (!slot_id || command->status != COMP_SUCCESS) {
                xhci_err(xhci, "Error while assigning device slot ID\n");
                xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
                                HCS_MAX_SLOTS(
@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
         * xhci_discover_or_reset_device(), which may be called as part of
         * mass storage driver error handling.
         */
-       if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
+       if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
                xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
                goto disable_slot;
        }
-       udev->slot_id = xhci->slot_id;
+       udev->slot_id = slot_id;
 
 #ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_slot_ctx *slot_ctx;
        struct xhci_input_control_ctx *ctrl_ctx;
        u64 temp_64;
-       struct xhci_command *command;
+       struct xhci_command *command = NULL;
+
+       mutex_lock(&xhci->mutex);
 
        if (!udev->slot_id) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                                "Bad Slot ID %d", udev->slot_id);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        virt_dev = xhci->devs[udev->slot_id];
@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                 */
                xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
                        udev->slot_id);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (setup == SETUP_CONTEXT_ONLY) {
@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
                    SLOT_STATE_DEFAULT) {
                        xhci_dbg(xhci, "Slot already in default state\n");
-                       return 0;
+                       goto out;
                }
        }
 
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
-       if (!command)
-               return -ENOMEM;
+       if (!command) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        command->in_ctx = virt_dev->in_ctx;
        command->completion = &xhci->addr_dev;
@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        if (!ctrl_ctx) {
                xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
                                __func__);
-               kfree(command);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        /*
         * If this is the first Set Address since device plug-in or
@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                                "FIXME: allocate a command ring segment");
-               kfree(command);
-               return ret;
+               goto out;
        }
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                ret = -EINVAL;
                break;
        }
-       if (ret) {
-               kfree(command);
-               return ret;
-       }
+       if (ret)
+               goto out;
        temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
        xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                        "Op regs DCBAA ptr = %#016llx", temp_64);
@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                       "Internal device address = %d",
                       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
+out:
+       mutex_unlock(&xhci->mutex);
        kfree(command);
-       return 0;
+       return ret;
 }
 
 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                return 0;
        }
 
+       mutex_init(&xhci->mutex);
        xhci->cap_regs = hcd->regs;
        xhci->op_regs = hcd->regs +
                HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
        return 0;
 }
+
+/*
+ * If an init function is provided, an exit function must also be provided
+ * to allow module unload.
+ */
+static void __exit xhci_hcd_fini(void) { }
+
 module_init(xhci_hcd_init);
+module_exit(xhci_hcd_fini);
index ea75e8ccd3c11d397dc7a6a2ff45e78ae829fd81..6977f8491fa7ced6ea317bf75354a0eb7703670e 100644 (file)
@@ -1497,6 +1497,8 @@ struct xhci_hcd {
        struct list_head        lpm_failed_devs;
 
        /* slot enabling and address device helpers */
+       /* these are not thread safe so use mutex */
+       struct mutex mutex;
        struct completion       addr_dev;
        int slot_id;
        /* For USB 3.0 LPM enable/disable. */
index 3789b08ef67b037781e278c41c0d4b2f2d33e5d9..6dca3d794ced6e1948dd5cbb180e708893f7ba83 100644 (file)
@@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (musb->ops->quirks)
                musb->io.quirks = musb->ops->quirks;
 
-       /* At least tusb6010 has it's own offsets.. */
-       if (musb->ops->ep_offset)
-               musb->io.ep_offset = musb->ops->ep_offset;
-       if (musb->ops->ep_select)
-               musb->io.ep_select = musb->ops->ep_select;
-
-       /* ..and some devices use indexed offset or flat offset */
+       /* Most devices use indexed offset or flat offset */
        if (musb->io.quirks & MUSB_INDEXED_EP) {
                musb->io.ep_offset = musb_indexed_ep_offset;
                musb->io.ep_select = musb_indexed_ep_select;
@@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                musb->io.ep_select = musb_flat_ep_select;
        }
 
+       /* At least tusb6010 has its own offsets */
+       if (musb->ops->ep_offset)
+               musb->io.ep_offset = musb->ops->ep_offset;
+       if (musb->ops->ep_select)
+               musb->io.ep_select = musb->ops->ep_select;
+
        if (musb->ops->fifo_mode)
                fifo_mode = musb->ops->fifo_mode;
        else
index 7225d526df0446ff26fd69ef65268265737d8c66..03ab0c699f74dd1768f2b769ca823eb7904132ab 100644 (file)
@@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_link_status_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-link-status", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for link status irq\n");
@@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_disconnect_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-id-fall", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for ID fall irq\n");
@@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_disconnect_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-vbus-fall", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
index 845f658276b106342907c7606a078dbfa47d06d1..2b28443d07b92daed26660f1d80f0bd390937992 100644 (file)
@@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, tu);
 
        tu->irq = platform_get_irq(pdev, 0);
-       ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
+       ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
+                                  IRQF_ONESHOT,
                                   "tahvo-vbus", tu);
        if (ret) {
                dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
index 8597cf9cfceb7715883738ac8cf1c0380e9a00b1..c0f5c652d272c8959f5b3d59461e1af139d6f7fd 100644 (file)
@@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
+       struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
 
        if (usbhs_pipe_is_busy(pipe))
                return 0;
@@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
        usbhs_pipe_data_sequence(pipe, pkt->sequence);
        pkt->sequence = -1; /* -1 sequence will be ignored */
 
+       if (usbhs_pipe_is_dcp(pipe))
+               usbhsf_fifo_clear(pipe, fifo);
+
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
        usbhs_pipe_enable(pipe);
        usbhs_pipe_running(pipe, 1);
@@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
                *is_done = 1;
                usbhsf_rx_irq_ctrl(pipe, 0);
                usbhs_pipe_running(pipe, 0);
-               usbhs_pipe_disable(pipe);       /* disable pipe first */
+               /*
+                * If function mode, since this controller is possible to enter
+                * Control Write status stage at this timing, this driver
+                * should not disable the pipe. If such a case happens, this
+                * controller is not able to complete the status stage.
+                */
+               if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
+                       usbhs_pipe_disable(pipe);       /* disable pipe first */
        }
 
        /*
@@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
 {
        char name[16];
 
-       snprintf(name, sizeof(name), "tx%d", channel);
-       fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
-       if (IS_ERR(fifo->tx_chan))
-               fifo->tx_chan = NULL;
-
-       snprintf(name, sizeof(name), "rx%d", channel);
-       fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
-       if (IS_ERR(fifo->rx_chan))
-               fifo->rx_chan = NULL;
+       /*
+        * To avoid complex handing for DnFIFOs, the driver uses each
+        * DnFIFO as TX or RX direction (not bi-direction).
+        * So, the driver uses odd channels for TX, even channels for RX.
+        */
+       snprintf(name, sizeof(name), "ch%d", channel);
+       if (channel & 1) {
+               fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+               if (IS_ERR(fifo->tx_chan))
+                       fifo->tx_chan = NULL;
+       } else {
+               fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+               if (IS_ERR(fifo->rx_chan))
+                       fifo->rx_chan = NULL;
+       }
 }
 
 static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
index 9031750e7404a566d3c08c30e0366c424dcd0b06..ffd739e31bfc193b058628560e86ea6f9b96f375 100644 (file)
@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index 8eb68a31cab6c4021617ca555cd58b086872c112..4c8b3b82103d6318ea1d46250ad708bb3f722260 100644 (file)
@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
index 4e4f46f3c89c025670d42860756f39b2bb62ae24..792e054126de51402711814f5962945f7742e188 100644 (file)
 #define XSENS_AWINDA_STATION_PID 0x0101
 #define XSENS_AWINDA_DONGLE_PID 0x0102
 #define XSENS_MTW_PID          0x0200  /* Xsens MTw */
+#define XSENS_MTDEVBOARD_PID   0x0300  /* Motion Tracker Development Board */
 #define XSENS_CONVERTER_PID    0xD00D  /* Xsens USB-serial converter */
 
 /* Xsens devices using FTDI VID */
index 5e19bb53b3a99a4ccc93696bf9792a4f4f1ad7c4..ea32b386797f5d52b70ee6f4028f5e8df43f3a8f 100644 (file)
@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                         * dependency now.
                         */
                        se_tpg = &tpg->se_tpg;
-                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                                  &se_tpg->tpg_group.cg_item);
+                       ret = target_depend_item(&se_tpg->tpg_group.cg_item);
                        if (ret) {
                                pr_warn("configfs_depend_item() failed: %d\n", ret);
                                kfree(vs_tpg);
@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
                 */
                se_tpg = &tpg->se_tpg;
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 3a145a643e0d5185146001275b47d3d0cc745454..6897f1c1bc732efe36632895fcedc577b7292a33 100644 (file)
@@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
 
        pb->pwm = devm_pwm_get(&pdev->dev, NULL);
        if (IS_ERR(pb->pwm)) {
+               ret = PTR_ERR(pb->pwm);
+               if (ret == -EPROBE_DEFER)
+                       goto err_alloc;
+
                dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
                pb->legacy = true;
                pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
index e894eb278d8336d018d3e6e8c29556dc9b5f3cb5..eba1b7ac729454d30b1d611cd01d45b5ba23407e 100644 (file)
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
                if (cpu == -1)
                        irq_set_affinity_hint(irq, NULL);
                else {
+                       cpumask_clear(mask);
                        cpumask_set_cpu(cpu, mask);
                        irq_set_affinity_hint(irq, mask);
                }
index 241ef68d28930a7faed26f18b67b296138e61d9e..cd46e415883090747d8238c2a2fbaa9b101dbc5e 100644 (file)
@@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                        total_size = total_mapping_size(elf_phdata,
                                                        loc->elf_ex.e_phnum);
                        if (!total_size) {
-                               error = -EINVAL;
+                               retval = -EINVAL;
                                goto out_free_dentry;
                        }
                }
index 430e0348c99ebb9b86c65ccd957a1b5e69ed6a2a..7dc886c9a78fc428b368a1c911b8c1ad745f48a5 100644 (file)
@@ -24,6 +24,7 @@
 #include "cifsfs.h"
 #include "dns_resolve.h"
 #include "cifs_debug.h"
+#include "cifs_unicode.h"
 
 static LIST_HEAD(cifs_dfs_automount_list);
 
@@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        xid = get_xid();
        rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
                &num_referrals, &referrals,
-               cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+               cifs_remap(cifs_sb));
        free_xid(xid);
 
        cifs_put_tlink(tlink);
index 0303c6793d903ab07cb7d1829bc372f0be3fc15f..5a53ac6b1e02515be90a4e446b103aa9f6f26874 100644 (file)
 #include "cifsglob.h"
 #include "cifs_debug.h"
 
-/*
- * cifs_utf16_bytes - how long will a string be after conversion?
- * @utf16 - pointer to input string
- * @maxbytes - don't go past this many bytes of input string
- * @codepage - destination codepage
- *
- * Walk a utf16le string and return the number of bytes that the string will
- * be after being converted to the given charset, not including any null
- * termination required. Don't walk past maxbytes in the source buffer.
- */
-int
-cifs_utf16_bytes(const __le16 *from, int maxbytes,
-               const struct nls_table *codepage)
-{
-       int i;
-       int charlen, outlen = 0;
-       int maxwords = maxbytes / 2;
-       char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
-
-       for (i = 0; i < maxwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
-                       break;
-
-               charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
-               if (charlen > 0)
-                       outlen += charlen;
-               else
-                       outlen++;
-       }
-
-       return outlen;
-}
-
 int cifs_remap(struct cifs_sb_info *cifs_sb)
 {
        int map_type;
@@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target)
  * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
  */
 static int
-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
             int maptype)
 {
        int len = 1;
+       __u16 src_char;
+
+       src_char = *from;
 
        if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
                return len;
@@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
 
        /* if character not one of seven in special remap set */
        len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
-       if (len <= 0) {
-               *target = '?';
-               len = 1;
-       }
+       if (len <= 0)
+               goto surrogate_pair;
+
+       return len;
+
+surrogate_pair:
+       /* convert SURROGATE_PAIR and IVS */
+       if (strcmp(cp->charset, "utf8"))
+               goto unknown;
+       len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
+       if (len <= 0)
+               goto unknown;
+       return len;
+
+unknown:
+       *target = '?';
+       len = 1;
        return len;
 }
 
@@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        int nullsize = nls_nullsize(codepage);
        int fromwords = fromlen / 2;
        char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
+       __u16 ftmp[3];          /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
 
        /*
         * because the chars can be of varying widths, we need to take care
@@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
        for (i = 0; i < fromwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
                        break;
+               if (i + 1 < fromwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < fromwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
 
                /*
                 * check to see if converting this character might make the
@@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
                /* put converted char into 'to' buffer */
                charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
                outlen += charlen;
+
+               /* charlen (=bytes of UTF-8 for 1 character)
+                * 4bytes UTF-8(surrogate pair) is charlen=4
+                *   (4bytes UTF-16 code)
+                * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
+                *   (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
+               if (charlen == 4)
+                       i++;
+               else if (charlen >= 5)
+                       /* 5-6bytes UTF-8 */
+                       i += 2;
        }
 
        /* properly null-terminate string */
@@ -295,6 +295,46 @@ success:
        return i;
 }
 
+/*
+ * cifs_utf16_bytes - how long will a string be after conversion?
+ * @utf16 - pointer to input string
+ * @maxbytes - don't go past this many bytes of input string
+ * @codepage - destination codepage
+ *
+ * Walk a utf16le string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ */
+int
+cifs_utf16_bytes(const __le16 *from, int maxbytes,
+               const struct nls_table *codepage)
+{
+       int i;
+       int charlen, outlen = 0;
+       int maxwords = maxbytes / 2;
+       char tmp[NLS_MAX_CHARSET_SIZE];
+       __u16 ftmp[3];
+
+       for (i = 0; i < maxwords; i++) {
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
+                       break;
+               if (i + 1 < maxwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < maxwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
+
+               charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
+               outlen += charlen;
+       }
+
+       return outlen;
+}
+
 /*
  * cifs_strndup_from_utf16 - copy a string from wire format to the local
  * codepage
@@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        char src_char;
        __le16 dst_char;
        wchar_t tmp;
+       wchar_t *wchar_to;      /* UTF-16 */
+       int ret;
+       unicode_t u;
 
        if (map_chars == NO_MAP_UNI_RSVD)
                return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
+       wchar_to = kzalloc(6, GFP_KERNEL);
+
        for (i = 0; i < srclen; j++) {
                src_char = source[i];
                charlen = 1;
@@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                         * if no match, use question mark, which at least in
                         * some cases serves as wild card
                         */
-                       if (charlen < 1) {
-                               dst_char = cpu_to_le16(0x003f);
-                               charlen = 1;
+                       if (charlen > 0)
+                               goto ctoUTF16;
+
+                       /* convert SURROGATE_PAIR */
+                       if (strcmp(cp->charset, "utf8") || !wchar_to)
+                               goto unknown;
+                       if (*(source + i) & 0x80) {
+                               charlen = utf8_to_utf32(source + i, 6, &u);
+                               if (charlen < 0)
+                                       goto unknown;
+                       } else
+                               goto unknown;
+                       ret  = utf8s_to_utf16s(source + i, charlen,
+                                              UTF16_LITTLE_ENDIAN,
+                                              wchar_to, 6);
+                       if (ret < 0)
+                               goto unknown;
+
+                       i += charlen;
+                       dst_char = cpu_to_le16(*wchar_to);
+                       if (charlen <= 3)
+                               /* 1-3bytes UTF-8 to 2bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                       else if (charlen == 4) {
+                               /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
+                                * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
+                                *   (charlen=3+4 or 4+4) */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                       } else if (charlen >= 5) {
+                               /* 5-6bytes UTF-8 to 6bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 2));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
                        }
+                       continue;
+
+unknown:
+                       dst_char = cpu_to_le16(0x003f);
+                       charlen = 1;
                }
+
+ctoUTF16:
                /*
                 * character may take more than one byte in the source string,
                 * but will take exactly two bytes in the target string
@@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
 
 ctoUTF16_out:
        put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+       kfree(wchar_to);
        return j;
 }
 
index f5089bde363576dcab6a35887f3c539a8a7e6247..0a9fb6b53126a7c95715a862bfb3b067f443fc1a 100644 (file)
@@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",nouser_xattr");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
                seq_puts(s, ",mapchars");
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+               seq_puts(s, ",mapposix");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
                seq_puts(s, ",sfu");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
index c31ce98c1704a32b998f993d9a26613dc1342e29..c63fd1dde25b861b011f604522572c5619f177f1 100644 (file)
@@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid,
 extern int CIFSUnixCreateSymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **syminfo,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                               __u16 fid, char **symlinkinfo,
                               const struct nls_table *nls_codepage);
index 84650a51c7c4064357eab083868cc613a75f7f18..f26ffbfc64d8b4eca26b8e8101f705043fc7a4a0 100644 (file)
@@ -2784,7 +2784,7 @@ copyRetry:
 int
 CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *fromName, const char *toName,
-                     const struct nls_table *nls_codepage)
+                     const struct nls_table *nls_codepage, int remap)
 {
        TRANSACTION2_SPI_REQ *pSMB = NULL;
        TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -2804,9 +2804,9 @@ createSymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
-                                   /* find define for this maxpathcomponent */
-                                   PATH_MAX, nls_codepage);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -2828,9 +2828,9 @@ createSymLinkRetry:
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len_target =
-                   cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
-                                   /* find define for this maxpathcomponent */
-                                   , nls_codepage);
+                   cifsConvertToUTF16((__le16 *) data_offset, toName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3034,7 +3034,7 @@ winCreateHardLinkRetry:
 int
 CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **symlinkinfo,
-                       const struct nls_table *nls_codepage)
+                       const struct nls_table *nls_codepage, int remap)
 {
 /* SMB_QUERY_FILE_UNIX_LINK */
        TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3055,8 +3055,9 @@ querySymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
-                                       PATH_MAX, nls_codepage);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4917,7 +4918,7 @@ getDFSRetry:
                strncpy(pSMB->RequestFileName, search_name, name_len);
        }
 
-       if (ses->server && ses->server->sign)
+       if (ses->server->sign)
                pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
        pSMB->hdr.Uid = ses->Suid;
index f3bfe08e177b6c86a4f1a99a8905f1b417f82af5..8383d5ea42028dac6788e642b6c3ed0f61459d51 100644 (file)
@@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                rc = generic_ip_connect(server);
                if (rc) {
                        cifs_dbg(FYI, "reconnect error %d\n", rc);
+                       mutex_unlock(&server->srv_mutex);
                        msleep(3000);
                } else {
                        atomic_inc(&tcpSesReconnectCount);
@@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
                        if (server->tcpStatus != CifsExiting)
                                server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
+                       mutex_unlock(&server->srv_mutex);
                }
-               mutex_unlock(&server->srv_mutex);
        } while (server->tcpStatus == CifsNeedReconnect);
 
        return rc;
index 338d56936f6af694b7085284a38e7b751ba7eb66..c3eb998a99bd18a2ed9b7b843c99be15fedab9df 100644 (file)
@@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
                }
                rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
                                            cifs_sb->local_nls,
-                                           cifs_sb->mnt_cifs_flags &
-                                               CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                           cifs_remap(cifs_sb));
                if (rc)
                        goto mknod_out;
 
index cafbf10521d5017074196e02ad37218939d0ab70..3f50cee79df9d3318209e19281acef536b34af37 100644 (file)
@@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
        posix_flags = cifs_posix_convert_flags(f_flags);
        rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
                             poplock, full_path, cifs_sb->local_nls,
-                            cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                            cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (rc)
@@ -1553,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                rc = server->ops->mand_unlock_range(cfile, flock, xid);
 
 out:
-       if (flock->fl_flags & FL_POSIX)
-               posix_lock_file_wait(file, flock);
+       if (flock->fl_flags & FL_POSIX && !rc)
+               rc = posix_lock_file_wait(file, flock);
        return rc;
 }
 
index 55b58112d122248b92305ea00eb66c6715a40b03..f621b44cb8009fe87bf631e0a96c941fe63d3408 100644 (file)
@@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 
        /* could have done a find first instead but this returns more info */
        rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
-                                 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                 cifs_sb->local_nls, cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (!rc) {
@@ -402,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                        rc = -ENOMEM;
        } else {
                /* we already have inode, update it */
+
+               /* if uniqueid is different, return error */
+               if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+                   CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
+               /* if filetype is different, return error */
+               if (unlikely(((*pinode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
+cgiiu_exit:
        return rc;
 }
 
@@ -839,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                if (!*inode)
                        rc = -ENOMEM;
        } else {
+               /* we already have inode, update it */
+
+               /* if filetype is different, return error */
+               if (unlikely(((*inode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgii_exit;
+               }
+
                cifs_fattr_to_inode(*inode, &fattr);
        }
 
@@ -2215,8 +2239,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
                pTcon = tlink_tcon(tlink);
                rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
                                    cifs_sb->local_nls,
-                                   cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                   cifs_remap(cifs_sb));
                cifs_put_tlink(tlink);
        }
 
index 252e672d56043468fb8f906ce371acef27d74db0..e6c707cc62b39b445b4b374eeec51a5be4fe07f4 100644 (file)
@@ -717,7 +717,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
                rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
        else if (pTcon->unix_ext)
                rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
-                                          cifs_sb->local_nls);
+                                          cifs_sb->local_nls,
+                                          cifs_remap(cifs_sb));
        /* else
           rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
                                        cifs_sb_target->local_nls); */
index b4a47237486b883851e889e78505bd1179c7842d..b1eede3678a91d8d1ea3e350cb035cabf1da7ba7 100644 (file)
@@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
        if (dentry) {
                inode = d_inode(dentry);
                if (inode) {
+                       if (d_mountpoint(dentry))
+                               goto out;
                        /*
                         * If we're generating inode numbers, then we don't
                         * want to clobber the existing one with the one that
index 7bfdd6066276256fc03855cd809f63c167d3991b..fc537c29044edd8a158bb130a65e371370826164 100644 (file)
@@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        /* Check for unix extensions */
        if (cap_unix(tcon->ses)) {
                rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
-                                            cifs_sb->local_nls);
+                                            cifs_sb->local_nls,
+                                            cifs_remap(cifs_sb));
                if (rc == -EREMOTE)
                        rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
                                                    target_path,
index 65cd7a84c8bc3206033a917fe9d98fc939cbe1af..54cbe19d9c0871a1bb47a17edfc1d414cb383b9f 100644 (file)
@@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
 
        /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
        /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
-       if ((tcon->ses) &&
+       if ((tcon->ses) && (tcon->ses->server) &&
            (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
                hdr->CreditCharge = cpu_to_le16(1);
        /* else CreditCharge MBZ */
index 656ce522a218f29850e2415b0a038c8208a5dd52..37b5afdaf6989e211151cc55a7fa656a6addd364 100644 (file)
@@ -1239,13 +1239,13 @@ ascend:
                /* might go back up the wrong parent if we have had a rename. */
                if (need_seqretry(&rename_lock, seq))
                        goto rename_retry;
-               next = child->d_child.next;
-               while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
                        if (next == &this_parent->d_subdirs)
                                goto ascend;
                        child = list_entry(next, struct dentry, d_child);
-                       next = next->next;
-               }
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
                rcu_read_unlock();
                goto resume;
        }
index 999ff5c3cab0edacd585447132180d5c35554e3c..d59712dfa3e701e86ff53609308e813cf8acf69e 100644 (file)
@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
                goto out_err;
        }
        /* copy the full handle */
-       if (copy_from_user(handle, ufh,
-                          sizeof(struct file_handle) +
+       *handle = f_handle;
+       if (copy_from_user(&handle->f_handle,
+                          &ufh->f_handle,
                           f_handle.handle_bytes)) {
                retval = -EFAULT;
                goto out_handle;
index 082234581d05b2b2190601f3b8a5f545f7380140..83f4e76511c2bf7804c922f268ba4319a5cfb799 100644 (file)
@@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb,
        goto out;
 
 found:
-       *return_block = i * bits_per_entry + bit;
+       *return_block = (u64) i * bits_per_entry + bit;
        *return_size = run;
        ret = set_run(sb, i, bits_per_entry, bit, run, 1);
 
index 138321b0c6c2b95a8efcef1a5b3183f3126acbb9..3d935c81789aaab13e33e52fab88b408fbccd6f2 100644 (file)
@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
  */
 static int omfs_get_imap(struct super_block *sb)
 {
-       unsigned int bitmap_size, count, array_size;
+       unsigned int bitmap_size, array_size;
+       int count;
        struct omfs_sb_info *sbi = OMFS_SB(sb);
        struct buffer_head *bh;
        unsigned long **ptr;
@@ -359,7 +360,7 @@ nomem:
 }
 
 enum {
-       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
+       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
 };
 
 static const match_table_t tokens = {
@@ -368,6 +369,7 @@ static const match_table_t tokens = {
        {Opt_umask, "umask=%o"},
        {Opt_dmask, "dmask=%o"},
        {Opt_fmask, "fmask=%o"},
+       {Opt_err, NULL},
 };
 
 static int parse_options(char *options, struct omfs_sb_info *sbi)
@@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        sb->s_root = d_make_root(root);
-       if (!sb->s_root)
+       if (!sb->s_root) {
+               ret = -ENOMEM;
                goto out_brelse_bh2;
+       }
        printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name);
 
        ret = 0;
index 24f640441bd90977a079aac782768025c68f3712..84d693d374284b580208fec3b8eb3c57bdd4195c 100644 (file)
@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        struct cred *override_cred;
        char *link = NULL;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
index d139405d2bfad7cfd94c735913ecebf221def5b5..692ceda3bc21f6976b65f3e2d5aa4b7ef2e9c5e8 100644 (file)
@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        struct kstat stat;
        int err;
 
+       if (WARN_ON(!workdir))
+               return ERR_PTR(-EROFS);
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
-               opaquedir = ovl_check_empty_and_clear(dentry);
-               err = PTR_ERR(opaquedir);
-               if (IS_ERR(opaquedir))
-                       goto out;
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
+       if (is_dir) {
+               if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
+                       opaquedir = ovl_check_empty_and_clear(dentry);
+                       err = PTR_ERR(opaquedir);
+                       if (IS_ERR(opaquedir))
+                               goto out;
+               } else {
+                       LIST_HEAD(list);
+
+                       /*
+                        * When removing an empty opaque directory, then it
+                        * makes no sense to replace it with an exact replica of
+                        * itself.  But emptiness still needs to be checked.
+                        */
+                       err = ovl_check_empty_dir(dentry, &list);
+                       ovl_cache_free(&list);
+                       if (err)
+                               goto out;
+               }
        }
 
        err = ovl_lock_rename_workdir(workdir, upperdir);
index 5f0d1993e6e3952bda9352d231e8fce7dee838e8..bf8537c7f455207830046a50d67d394f86d37f4a 100644 (file)
@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
+       if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
                return -EROFS;
 
        return 0;
@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
                err = PTR_ERR(ufs->workdir);
                if (IS_ERR(ufs->workdir)) {
-                       pr_err("overlayfs: failed to create directory %s/%s\n",
-                              ufs->config.workdir, OVL_WORKDIR_NAME);
-                       goto out_put_upper_mnt;
+                       pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+                               ufs->config.workdir, OVL_WORKDIR_NAME, -err);
+                       sb->s_flags |= MS_RDONLY;
+                       ufs->workdir = NULL;
                }
        }
 
@@ -997,7 +998,6 @@ out_put_lower_mnt:
        kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_upper_mnt:
        mntput(ufs->upper_mnt);
 out_put_lowerpath:
        for (i = 0; i < numlower; i++)
index 04e79d57bca600b4a21cd0a2a936639d9556e6c8..e9d401ce93bb19d822a2ec9b475dae7ad5d279c1 100644 (file)
@@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
  * After the last attribute is removed revert to original inode format,
  * making all literal area available to the data fork once more.
  */
-STATIC void
-xfs_attr_fork_reset(
+void
+xfs_attr_fork_remove(
        struct xfs_inode        *ip,
        struct xfs_trans        *tp)
 {
@@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
            (mp->m_flags & XFS_MOUNT_ATTR2) &&
            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
            !(args->op_flags & XFS_DA_OP_ADDNAME)) {
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
        } else {
                xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
                dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
@@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform(
        if (forkoff == -1) {
                ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
                ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
                goto out;
        }
 
index 025c4b820c03a1c642c18d9c53d46e08a0ab2d94..882c8d3388913b3d44aa9105184920feb89709a6 100644 (file)
@@ -53,7 +53,7 @@ int   xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_list(struct xfs_attr_list_context *context);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int    xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
-
+void   xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
 
 /*
  * Internal routines when attribute fork size == XFS_LBSIZE(mp).
index aeffeaaac0ec406e543730eb608de1eb3ebc40fb..f1026e86dabc9a00ead716785a3acb5c19ee8e10 100644 (file)
@@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align(
                align_alen += temp;
                align_off -= temp;
        }
+
+       /* Same adjustment for the end of the requested area. */
+       temp = (align_alen % extsz);
+       if (temp)
+               align_alen += extsz - temp;
+
        /*
-        * Same adjustment for the end of the requested area.
+        * For large extent hint sizes, the aligned extent might be larger than
+        * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
+        * the length back under MAXEXTLEN. The outer allocation loops handle
+        * short allocation just fine, so it is safe to do this. We only want to
+        * do it when we are forced to, though, because it means more allocation
+        * operations are required.
         */
-       if ((temp = (align_alen % extsz))) {
-               align_alen += extsz - temp;
-       }
+       while (align_alen > MAXEXTLEN)
+               align_alen -= extsz;
+       ASSERT(align_alen <= MAXEXTLEN);
+
        /*
         * If the previous block overlaps with this proposed allocation
         * then move the start forward without adjusting the length.
@@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align(
                        return -EINVAL;
        } else {
                ASSERT(orig_off >= align_off);
-               ASSERT(orig_end <= align_off + align_alen);
+               /* see MAXEXTLEN handling above */
+               ASSERT(orig_end <= align_off + align_alen ||
+                      align_alen + extsz > MAXEXTLEN);
        }
 
 #ifdef DEBUG
@@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc(
        /* Figure out the extent size, adjust alen */
        extsz = xfs_get_extsz_hint(ip);
        if (extsz) {
-               /*
-                * Make sure we don't exceed a single extent length when we
-                * align the extent by reducing length we are going to
-                * allocate by the maximum amount extent size aligment may
-                * require.
-                */
-               alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
                error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
                                               1, 0, &aoff, &alen);
                ASSERT(!error);
index 07349a183a110fdf57bdf9a7f1d704452de5cdb7..1c9e75521250ecf606639578ce79696b6ff4a682 100644 (file)
@@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc(
         */
        newlen = args.mp->m_ialloc_inos;
        if (args.mp->m_maxicount &&
-           percpu_counter_read(&args.mp->m_icount) + newlen >
+           percpu_counter_read_positive(&args.mp->m_icount) + newlen >
                                                        args.mp->m_maxicount)
                return -ENOSPC;
        args.minlen = args.maxlen = args.mp->m_ialloc_blks;
@@ -1339,10 +1339,13 @@ xfs_dialloc(
         * If we have already hit the ceiling of inode blocks then clear
         * okalloc so we scan all available agi structures for a free
         * inode.
+        *
+        * Read rough value of mp->m_icount by percpu_counter_read_positive,
+        * which will sacrifice the preciseness but improve the performance.
         */
        if (mp->m_maxicount &&
-           percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos >
-                                                       mp->m_maxicount) {
+           percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
+                                                       mp->m_maxicount) {
                noroom = 1;
                okalloc = 0;
        }
index f9c1c64782d39ec36fabf800653772f8c5b24280..3fbf167cfb4cddfcb42a57ca7d613096d5c97fe0 100644 (file)
@@ -380,23 +380,31 @@ xfs_attr3_root_inactive(
        return error;
 }
 
+/*
+ * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
+ * removes both the on-disk and in-memory inode fork. Note that this also has to
+ * handle the condition of inodes without attributes but with an attribute fork
+ * configured, so we can't use xfs_inode_hasattr() here.
+ *
+ * The in-memory attribute fork is removed even on error.
+ */
 int
-xfs_attr_inactive(xfs_inode_t *dp)
+xfs_attr_inactive(
+       struct xfs_inode        *dp)
 {
-       xfs_trans_t *trans;
-       xfs_mount_t *mp;
-       int error;
+       struct xfs_trans        *trans;
+       struct xfs_mount        *mp;
+       int                     cancel_flags = 0;
+       int                     lock_mode = XFS_ILOCK_SHARED;
+       int                     error = 0;
 
        mp = dp->i_mount;
        ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
 
-       xfs_ilock(dp, XFS_ILOCK_SHARED);
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               xfs_iunlock(dp, XFS_ILOCK_SHARED);
-               return 0;
-       }
-       xfs_iunlock(dp, XFS_ILOCK_SHARED);
+       xfs_ilock(dp, lock_mode);
+       if (!XFS_IFORK_Q(dp))
+               goto out_destroy_fork;
+       xfs_iunlock(dp, lock_mode);
 
        /*
         * Start our first transaction of the day.
@@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
         * the inode in every transaction to let it float upward through
         * the log.
         */
+       lock_mode = 0;
        trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
        error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
-       if (error) {
-               xfs_trans_cancel(trans, 0);
-               return error;
-       }
-       xfs_ilock(dp, XFS_ILOCK_EXCL);
+       if (error)
+               goto out_cancel;
+
+       lock_mode = XFS_ILOCK_EXCL;
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
+       xfs_ilock(dp, lock_mode);
+
+       if (!XFS_IFORK_Q(dp))
+               goto out_cancel;
 
        /*
         * No need to make quota reservations here. We expect to release some
@@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
         */
        xfs_trans_ijoin(trans, dp, 0);
 
-       /*
-        * Decide on what work routines to call based on the inode size.
-        */
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               error = 0;
-               goto out;
+       /* invalidate and truncate the attribute fork extents */
+       if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+               error = xfs_attr3_root_inactive(&trans, dp);
+               if (error)
+                       goto out_cancel;
+
+               error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+               if (error)
+                       goto out_cancel;
        }
-       error = xfs_attr3_root_inactive(&trans, dp);
-       if (error)
-               goto out;
 
-       error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-       if (error)
-               goto out;
+       /* Reset the attribute fork - this also destroys the in-core fork */
+       xfs_attr_fork_remove(dp, trans);
 
        error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
+       xfs_iunlock(dp, lock_mode);
        return error;
 
-out:
-       xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
+out_cancel:
+       xfs_trans_cancel(trans, cancel_flags);
+out_destroy_fork:
+       /* kill the in-core attr fork before we drop the inode lock */
+       if (dp->i_afp)
+               xfs_idestroy_fork(dp, XFS_ATTR_FORK);
+       if (lock_mode)
+               xfs_iunlock(dp, lock_mode);
        return error;
 }
index 8121e75352ee9bddd4726ca685d6d3e855256bdd..3b7591224f4a6698d32371a927e70cb2a391f4a9 100644 (file)
@@ -124,7 +124,7 @@ xfs_iozero(
                status = 0;
        } while (count);
 
-       return (-status);
+       return status;
 }
 
 int
index d6ebc85192b7b3f4fd21e3cbc25ccb5f54501319..539a85fddbc26864004e80f5fb229c6c2de565b8 100644 (file)
@@ -1946,21 +1946,17 @@ xfs_inactive(
        /*
         * If there are attributes associated with the file then blow them away
         * now.  The code calls a routine that recursively deconstructs the
-        * attribute fork.  We need to just commit the current transaction
-        * because we can't use it for xfs_attr_inactive().
+        * attribute fork. If also blows away the in-core attribute fork.
         */
-       if (ip->i_d.di_anextents > 0) {
-               ASSERT(ip->i_d.di_forkoff != 0);
-
+       if (XFS_IFORK_Q(ip)) {
                error = xfs_attr_inactive(ip);
                if (error)
                        return;
        }
 
-       if (ip->i_afp)
-               xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
+       ASSERT(!ip->i_afp);
        ASSERT(ip->i_d.di_anextents == 0);
+       ASSERT(ip->i_d.di_forkoff == 0);
 
        /*
         * Free the inode.
@@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout(
        if (error)
                return error;
 
-       /* Satisfy xfs_bumplink that this is a real tmpfile */
+       /*
+        * Prepare the tmpfile inode as if it were created through the VFS.
+        * Otherwise, the link increment paths will complain about nlink 0->1.
+        * Drop the link count as done by d_tmpfile(), complete the inode setup
+        * and flag it as linkable.
+        */
+       drop_nlink(VFS_I(tmpfile));
        xfs_finish_inode_setup(tmpfile);
        VFS_I(tmpfile)->i_state |= I_LINKABLE;
 
@@ -3151,7 +3153,7 @@ xfs_rename(
         * intermediate state on disk.
         */
        if (wip) {
-               ASSERT(wip->i_d.di_nlink == 0);
+               ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
                error = xfs_bumplink(tp, wip);
                if (error)
                        goto out_trans_abort;
index 2ce7ee3b4ec1fdb9e9344a1ec7ea3a2df5a3b29c..6f23fbdfb365adca1571eadece38b77a619c50ad 100644 (file)
@@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp)
        return xfs_sync_sb(mp, true);
 }
 
+/*
+ * Deltas for the inode count are +/-64, hence we use a large batch size
+ * of 128 so we don't need to take the counter lock on every update.
+ */
+#define XFS_ICOUNT_BATCH       128
 int
 xfs_mod_icount(
        struct xfs_mount        *mp,
        int64_t                 delta)
 {
-       /* deltas are +/-64, hence the large batch size of 128. */
-       __percpu_counter_add(&mp->m_icount, delta, 128);
-       if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
+       __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
+       if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
                ASSERT(0);
                percpu_counter_add(&mp->m_icount, -delta);
                return -EINVAL;
@@ -1113,6 +1117,14 @@ xfs_mod_ifree(
        return 0;
 }
 
+/*
+ * Deltas for the block count can vary from 1 to very large, but lock contention
+ * only occurs on frequent small block count updates such as in the delayed
+ * allocation path for buffered writes (page a time updates). Hence we set
+ * a large batch count (1024) to minimise global counter updates except when
+ * we get near to ENOSPC and we have to be very accurate with our updates.
+ */
+#define XFS_FDBLOCKS_BATCH     1024
 int
 xfs_mod_fdblocks(
        struct xfs_mount        *mp,
@@ -1151,25 +1163,19 @@ xfs_mod_fdblocks(
         * Taking blocks away, need to be more accurate the closer we
         * are to zero.
         *
-        * batch size is set to a maximum of 1024 blocks - if we are
-        * allocating of freeing extents larger than this then we aren't
-        * going to be hammering the counter lock so a lock per update
-        * is not a problem.
-        *
         * If the counter has a value of less than 2 * max batch size,
         * then make everything serialise as we are real close to
         * ENOSPC.
         */
-#define __BATCH        1024
-       if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
+       if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
+                                    XFS_FDBLOCKS_BATCH) < 0)
                batch = 1;
        else
-               batch = __BATCH;
-#undef __BATCH
+               batch = XFS_FDBLOCKS_BATCH;
 
        __percpu_counter_add(&mp->m_fdblocks, delta, batch);
-       if (percpu_counter_compare(&mp->m_fdblocks,
-                                  XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
+       if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
+                                    XFS_FDBLOCKS_BATCH) >= 0) {
                /* we had space! */
                return 0;
        }
index aff923ae8c4b963272563759b9ac52ad55778bd0..d87d8eced06407c59c6d231f9e707bdcc398ce52 100644 (file)
@@ -116,7 +116,6 @@ __printf(3, 4)
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
                const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
-void bdi_unregister(struct backing_dev_info *bdi);
 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
                        enum wb_reason reason);
index ae2982c0f7a60ed93339e767feaf1fc89aa02134..656da2a12ffee319f67cb744945f028599e0603c 100644 (file)
@@ -17,7 +17,7 @@
 #define PHY_ID_BCM7250                 0xae025280
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
-#define PHY_ID_BCM7425                 0x03625e60
+#define PHY_ID_BCM7425                 0x600d86b0
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7439_2               0xae025080
index 27e285b92b5f748b8ffe9a8e599c8850f0346007..59915ea5373ca798dca185070e11af88cc7745d9 100644 (file)
@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
        return 1;
 }
 
-static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       set_bit(0, cpumask_bits(dstp));
-
        return 0;
 }
 
@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
 
 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
+unsigned int cpumask_local_spread(unsigned int i, int node);
 
 /**
  * for_each_cpu - iterate over every cpu in a mask
index 796ef9645827f000cb76ce4cd8637dc6cfa4db7a..9e14edcf7f6e31245f0ef13950e1166e0f9e51d2 100644 (file)
@@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
  * Extended Capability Register
  */
 
+#define ecap_pasid(e)          ((e >> 40) & 0x1)
 #define ecap_pss(e)            ((e >> 35) & 0x1f)
 #define ecap_eafs(e)           ((e >> 34) & 0x1)
 #define ecap_nwfs(e)           ((e >> 33) & 0x1)
 #define ecap_srs(e)            ((e >> 31) & 0x1)
 #define ecap_ers(e)            ((e >> 30) & 0x1)
 #define ecap_prs(e)            ((e >> 29) & 0x1)
-#define ecap_pasid(e)          ((e >> 28) & 0x1)
+/* PASID support used to be on bit 28 */
 #define ecap_dis(e)            ((e >> 27) & 0x1)
 #define ecap_nest(e)           ((e >> 26) & 0x1)
 #define ecap_mts(e)            ((e >> 25) & 0x1)
@@ -295,6 +296,7 @@ struct q_inval {
 /* 1MB - maximum possible interrupt remapping table size */
 #define INTR_REMAP_PAGE_ORDER  8
 #define INTR_REMAP_TABLE_REG_SIZE      0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
 
 #define INTR_REMAP_TABLE_ENTRIES       65536
 
@@ -319,6 +321,9 @@ enum {
        MAX_SR_DMAR_REGS
 };
 
+#define VTD_FLAG_TRANS_PRE_ENABLED     (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+
 struct intel_iommu {
        void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
        u64             reg_phys; /* physical address of hw register set */
@@ -350,6 +355,7 @@ struct intel_iommu {
 #endif
        struct device   *iommu_dev; /* IOMMU-sysfs device */
        int             node;
+       u32             flags;      /* Software defined flags */
 };
 
 static inline void __iommu_flush_cache(
index 0546b8710ce308540abd841944aee07ba24fedfb..dc767f7c3704639da944153e8bdae7cf40cd804d 100644 (file)
@@ -114,6 +114,20 @@ enum iommu_attr {
        DOMAIN_ATTR_MAX,
 };
 
+/**
+ * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
+ */
+struct iommu_dm_region {
+       struct list_head        list;
+       phys_addr_t             start;
+       size_t                  length;
+       int                     prot;
+};
+
 #ifdef CONFIG_IOMMU_API
 
 /**
@@ -159,6 +173,10 @@ struct iommu_ops {
        int (*domain_set_attr)(struct iommu_domain *domain,
                               enum iommu_attr attr, void *data);
 
+       /* Request/Free a list of direct mapping requirements for a device */
+       void (*get_dm_regions)(struct device *dev, struct list_head *list);
+       void (*put_dm_regions)(struct device *dev, struct list_head *list);
+
        /* Window handling functions */
        int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
                                    phys_addr_t paddr, u64 size, int prot);
@@ -193,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
                               struct device *dev);
 extern void iommu_detach_device(struct iommu_domain *domain,
                                struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
                     phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -204,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
                        iommu_fault_handler_t handler, void *token);
 
+extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern int iommu_request_dm_for_dev(struct device *dev);
+
 extern int iommu_attach_group(struct iommu_domain *domain,
                              struct iommu_group *group);
 extern void iommu_detach_group(struct iommu_domain *domain,
@@ -227,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
                                           struct notifier_block *nb);
 extern int iommu_group_id(struct iommu_group *group);
 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
 
 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
@@ -332,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
 {
 }
 
+static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+       return NULL;
+}
+
 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
                            phys_addr_t paddr, int gfp_order, int prot)
 {
@@ -373,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 {
 }
 
+static inline void iommu_get_dm_regions(struct device *dev,
+                                       struct list_head *list)
+{
+}
+
+static inline void iommu_put_dm_regions(struct device *dev,
+                                       struct list_head *list)
+{
+}
+
+static inline int iommu_request_dm_for_dev(struct device *dev)
+{
+       return -ENODEV;
+}
+
 static inline int iommu_attach_group(struct iommu_domain *domain,
                                     struct iommu_group *group)
 {
index ddeaae6d2083b256b21b930f3eed8182510e26a8..b871ff9d81d7207333fa021e6a95cb6bdbcf34ac 100644 (file)
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
 extern raw_spinlock_t devtree_lock;
 
 #ifdef CONFIG_OF
+void of_core_init(void);
+
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
        return fwnode && fwnode->type == FWNODE_OF;
@@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
 
 #else /* CONFIG_OF */
 
+static inline void of_core_init(void)
+{
+}
+
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
        return false;
index 50e50095c8d172777c4ea2857435444385b81ece..84a1094496100906c1b89714f921451a00babb9f 100644 (file)
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
 s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+       return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
 
 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
                return 0;
 }
 
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+       return percpu_counter_compare(fbc, rhs);
+}
+
 static inline void
 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
index 61992cf2e9771699ee06595c8fbb1bd39633018a..d8a82a89f35abd762e697f6f1be719a32a868cec 100644 (file)
@@ -92,8 +92,6 @@ struct hw_perf_event_extra {
        int             idx;    /* index in shared_regs->regs[] */
 };
 
-struct event_constraint;
-
 /**
  * struct hw_perf_event - performance event hardware details:
  */
@@ -112,8 +110,6 @@ struct hw_perf_event {
 
                        struct hw_perf_event_extra extra_reg;
                        struct hw_perf_event_extra branch_reg;
-
-                       struct event_constraint *constraint;
                };
                struct { /* software */
                        struct hrtimer  hrtimer;
index 497bc14cdb85f4f8b33da8fbe7aba9d491df50e2..0320bbb7d7b5a1987e7b85f6842cf9fa145d91c5 100644 (file)
@@ -98,7 +98,8 @@ struct inet_connection_sock {
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
-       __u8                      icsk_ca_state:7,
+       __u8                      icsk_ca_state:6,
+                                 icsk_ca_setsockopt:1,
                                  icsk_ca_dst_locked:1;
        __u8                      icsk_retransmits;
        __u8                      icsk_pending;
index 8e3668b44c2984aeb3531d14927dd2bf6b9f88a3..fc57f6b82fc59e4dc6ae72b802856ab80e0af6e9 100644 (file)
@@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data {
 };
 
 /**
- * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
  * @data: See &enum ieee80211_rssi_event_data
  */
 struct ieee80211_rssi_event {
@@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status {
 };
 
 /**
- * enum ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
  * @data: See &enum ieee80211_mlme_event_data
  * @status: See &enum ieee80211_mlme_event_status
  * @reason: the reason code if applicable
@@ -401,9 +401,10 @@ struct ieee80211_mlme_event {
 
 /**
  * struct ieee80211_event - event to be sent to the driver
- * @type The event itself. See &enum ieee80211_event_type.
+ * @type: The event itself. See &enum ieee80211_event_type.
  * @rssi: relevant if &type is %RSSI_EVENT
  * @mlme: relevant if &type is %AUTH_EVENT
+ * @u:    union holding the above two fields
  */
 struct ieee80211_event {
        enum ieee80211_event_type type;
index c56a438c3a1eaf89d630edb17dd20802f0e01590..ce13cf20f6253e866f52534b7e7dc10e5bac1a0e 100644 (file)
@@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
 /* Map v4 address to v4-mapped v6 address */
 static inline void sctp_v4_map_v6(union sctp_addr *addr)
 {
+       __be16 port;
+
+       port = addr->v4.sin_port;
+       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+       addr->v6.sin6_port = port;
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_flowinfo = 0;
        addr->v6.sin6_scope_id = 0;
-       addr->v6.sin6_port = addr->v4.sin_port;
-       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        addr->v6.sin6_addr.s6_addr32[0] = 0;
        addr->v6.sin6_addr.s6_addr32[1] = 0;
        addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
index 53a18b3635e24a458700a21566c56a10d0704c56..df705908480aebbf754900731834162fa8097f75 100644 (file)
@@ -9,6 +9,8 @@
 #include <sound/core.h>
 #include <sound/hdaudio.h>
 
+#define AC_AMP_FAKE_MUTE       0x10    /* fake mute bit set to amp verbs */
+
 int snd_hdac_regmap_init(struct hdac_device *codec);
 void snd_hdac_regmap_exit(struct hdac_device *codec);
 int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
index d61be7297b2c88c867acbacadcacfd65045e6c50..5f122570699339f5f5cf85fb14a90e2ad9fc51b7 100644 (file)
@@ -1,9 +1,7 @@
 #ifndef TARGET_CORE_BACKEND_H
 #define TARGET_CORE_BACKEND_H
 
-#define TRANSPORT_PLUGIN_PHBA_PDEV             1
-#define TRANSPORT_PLUGIN_VHBA_PDEV             2
-#define TRANSPORT_PLUGIN_VHBA_VDEV             3
+#define TRANSPORT_FLAG_PASSTHROUGH             1
 
 struct target_backend_cits {
        struct config_item_type tb_dev_cit;
@@ -22,7 +20,7 @@ struct se_subsystem_api {
        char inquiry_rev[4];
        struct module *owner;
 
-       u8 transport_type;
+       u8 transport_flags;
 
        int (*attach_hba)(struct se_hba *, u32);
        void (*detach_hba)(struct se_hba *);
@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
+sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
 #endif /* TARGET_CORE_BACKEND_H */
index 25bb04c4209ed5c42e82bc54d5ba3996194c4bb2..b99c01170392abff3978b6c96d7c1763f06bcc27 100644 (file)
@@ -40,8 +40,6 @@ struct target_fabric_configfs {
        struct config_item      *tf_fabric;
        /* Passed from fabric modules */
        struct config_item_type *tf_fabric_cit;
-       /* Pointer to target core subsystem */
-       struct configfs_subsystem *tf_subsys;
        /* Pointer to fabric's struct module */
        struct module *tf_module;
        struct target_core_fabric_ops tf_ops;
index 17c7f5ac7ea0f5066c6b7f2bae0d66b0021f6358..0f4dc3768587bc2d41370d015c69502757322324 100644 (file)
@@ -4,7 +4,6 @@
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
-       struct configfs_subsystem *tf_subsys;
        char *(*get_fabric_name)(void);
        u8 (*get_fabric_proto_ident)(struct se_portal_group *);
        char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -109,6 +108,9 @@ struct target_core_fabric_ops {
 int target_register_template(const struct target_core_fabric_ops *fo);
 void target_unregister_template(const struct target_core_fabric_ops *fo);
 
+int target_depend_item(struct config_item *item);
+void target_undepend_item(struct config_item *item);
+
 struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
                unsigned int);
index 81ea598121173bf782c04f7c6bfae63b5207b75e..f7554fd7fc62b92d6b1d73d7db6030599b552014 100644 (file)
@@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree,
        TP_ARGS(call_site, ptr)
 );
 
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
 
        TP_PROTO(unsigned long call_site, const void *ptr),
 
-       TP_ARGS(call_site, ptr)
+       TP_ARGS(call_site, ptr),
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id()))
 );
 
-TRACE_EVENT(mm_page_free,
+TRACE_EVENT_CONDITION(mm_page_free,
 
        TP_PROTO(struct page *page, unsigned int order),
 
        TP_ARGS(page, order),
 
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(        unsigned long,  pfn             )
                __field(        unsigned int,   order           )
@@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
        TP_ARGS(page, order, migratetype)
 );
 
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
 
        TP_PROTO(struct page *page, unsigned int order, int migratetype),
 
        TP_ARGS(page, order, migratetype),
 
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  pfn             )
+               __field(        unsigned int,   order           )
+               __field(        int,            migratetype     )
+       ),
+
+       TP_fast_assign(
+               __entry->pfn            = page ? page_to_pfn(page) : -1UL;
+               __entry->order          = order;
+               __entry->migratetype    = migratetype;
+       ),
+
        TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
                pfn_to_page(__entry->pfn), __entry->pfn,
                __entry->order, __entry->migratetype)
index 880dd74371729939a0179ef3dfd487e1fa017838..c178d13d6f4c0cb51d441c59e7b4975a1913ed3e 100644 (file)
@@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
 DEFINE_WRITEBACK_EVENT(writeback_nowork);
 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
 
 DECLARE_EVENT_CLASS(wbc_class,
        TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
index 984169a819ee4c8f2163c6a5c12d24fbf273caec..d7f1cbc3766c799ac514e4ab2710cb5cbb22a0c0 100644 (file)
@@ -26,6 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE. */
 #include <linux/types.h>
+#include <linux/virtio_types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 
index 24f00610c575fd5d34c40dcf9fad35b358596c22..333d364be29d9e6c8b209d9eaded9d28552a36d7 100644 (file)
@@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
                         * bitmap. We must however ensure the end of the
                         * kernel bitmap is zeroed.
                         */
-                       if (nr_compat_longs-- > 0) {
+                       if (nr_compat_longs) {
+                               nr_compat_longs--;
                                if (__get_user(um, umask))
                                        return -EFAULT;
                        } else {
@@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
                         * We dont want to write past the end of the userspace
                         * bitmap.
                         */
-                       if (nr_compat_longs-- > 0) {
+                       if (nr_compat_longs) {
+                               nr_compat_longs--;
                                if (__put_user(um, umask))
                                        return -EFAULT;
                        }
index 1a3bf48743ce1c62c26077d642084cbdc8b40d6b..eddf1ed4155eaa5b4bc9c5321d99596a1c70d751 100644 (file)
@@ -3442,7 +3442,6 @@ static void free_event_rcu(struct rcu_head *head)
        if (event->ns)
                put_pid_ns(event->ns);
        perf_event_free_filter(event);
-       perf_event_free_bpf_prog(event);
        kfree(event);
 }
 
@@ -3573,6 +3572,8 @@ static void __free_event(struct perf_event *event)
                        put_callchain_buffers();
        }
 
+       perf_event_free_bpf_prog(event);
+
        if (event->destroy)
                event->destroy(event);
 
index 232f00f273cbe419d2738d5f83465dd96529ee17..725c416085e318aa6c21633dcd102cc4864940cf 100644 (file)
@@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                        rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
        }
 
+       /*
+        * In overwrite mode, PMUs that don't support SG may not handle more
+        * than one contiguous allocation, since they rely on PMI to do double
+        * buffering. In this case, the entire buffer has to be one contiguous
+        * chunk.
+        */
+       if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
+           overwrite) {
+               struct page *page = virt_to_page(rb->aux_pages[0]);
+
+               if (page_private(page) != max_order)
+                       goto out;
+       }
+
        rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
                                             overwrite);
        if (!rb->aux_priv)
index a0831e1b99f4aabd6c80ea68cedb6350a7a9affc..aaeae885d9af7d1dc190c566d95e0d6bed845cfe 100644 (file)
@@ -3900,7 +3900,8 @@ static void zap_class(struct lock_class *class)
        list_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
-       class->key = NULL;
+       RCU_INIT_POINTER(class->key, NULL);
+       RCU_INIT_POINTER(class->name, NULL);
 }
 
 static inline int within(const void *addr, void *start, unsigned long size)
index ef43ac4bafb59b83ab979a680d49d6077749f955..d83d798bef95a042e1060a35bf4b79e7c7a6c05c 100644 (file)
@@ -426,10 +426,12 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
 
 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
 {
-       char name[39];
-       struct lock_class *class;
+       struct lockdep_subclass_key *ckey;
        struct lock_class_stats *stats;
+       struct lock_class *class;
+       const char *cname;
        int i, namelen;
+       char name[39];
 
        class = data->class;
        stats = &data->stats;
@@ -440,15 +442,25 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
        if (class->subclass)
                namelen -= 2;
 
-       if (!class->name) {
+       rcu_read_lock_sched();
+       cname = rcu_dereference_sched(class->name);
+       ckey  = rcu_dereference_sched(class->key);
+
+       if (!cname && !ckey) {
+               rcu_read_unlock_sched();
+               return;
+
+       } else if (!cname) {
                char str[KSYM_NAME_LEN];
                const char *key_name;
 
-               key_name = __get_key_name(class->key, str);
+               key_name = __get_key_name(ckey, str);
                snprintf(name, namelen, "%s", key_name);
        } else {
-               snprintf(name, namelen, "%s", class->name);
+               snprintf(name, namelen, "%s", cname);
        }
+       rcu_read_unlock_sched();
+
        namelen = strlen(name);
        if (class->name_version > 1) {
                snprintf(name+namelen, 3, "#%d", class->name_version);
index 42a1d2afb2173cd3c7c740098dd72d7a52bdb3f8..cfc9e843a924091e2be3d2a2bcef72a038737f64 100644 (file)
@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
        module_bug_cleanup(mod);
        mutex_unlock(&module_mutex);
 
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+
        /* we can't deallocate the module until we clear memory protection */
        unset_module_init_ro_nx(mod);
        unset_module_core_ro_nx(mod);
index ffeaa4105e48a36105ecaea8967082e1e7a7af98..c2980e8733bcb9da333f5d06e85441a78f1097a8 100644 (file)
@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
        }
        for (; vma; vma = vma->vm_next) {
                if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
-                       is_vm_hugetlb_page(vma)) {
+                       is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
                        continue;
                }
 
index 13d945c0d03f2bda5802971484b21bbe9f65301f..1b28df2d91042de97566454a80dcb36d24674a49 100644 (file)
@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
 
        if (producer_fifo >= 0) {
                struct sched_param param = {
-                       .sched_priority = consumer_fifo
+                       .sched_priority = producer_fifo
                };
                sched_setscheduler(producer, SCHED_FIFO, &param);
        } else
index 830dd5dec40f1697b2bf2e40a294e05284ded89c..5f627084f2e998b2605016c311411d91f7016918 100644 (file)
@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 #endif
 
 /**
- * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
- *
+ * cpumask_local_spread - select the i'th cpu with local numa cpu's first
  * @i: index number
- * @numa_node: local numa_node
- * @dstp: cpumask with the relevant cpu bit set according to the policy
+ * @node: local numa_node
  *
- * This function sets the cpumask according to a numa aware policy.
- * cpumask could be used as an affinity hint for the IRQ related to a
- * queue. When the policy is to spread queues across cores - local cores
- * first.
+ * This function selects an online CPU according to a numa aware policy;
+ * local cpus are returned first, followed by non-local ones, then it
+ * wraps around.
  *
- * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
- * the cpu bit and need to re-call the function.
+ * It's not very efficient, but useful for setup.
  */
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       cpumask_var_t mask;
        int cpu;
-       int ret = 0;
-
-       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-               return -ENOMEM;
 
+       /* Wrap: we always want a cpu. */
        i %= num_online_cpus();
 
-       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
-               /* Use all online cpu's for non numa aware system */
-               cpumask_copy(mask, cpu_online_mask);
+       if (node == -1) {
+               for_each_cpu(cpu, cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
        } else {
-               int n;
-
-               cpumask_and(mask,
-                           cpumask_of_node(numa_node), cpu_online_mask);
-
-               n = cpumask_weight(mask);
-               if (i >= n) {
-                       i -= n;
-
-                       /* If index > number of local cpu's, mask out local
-                        * cpu's
-                        */
-                       cpumask_andnot(mask, cpu_online_mask, mask);
+               /* NUMA first. */
+               for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
+
+               for_each_cpu(cpu, cpu_online_mask) {
+                       /* Skip NUMA nodes, done above. */
+                       if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+                               continue;
+
+                       if (i-- == 0)
+                               return cpu;
                }
        }
-
-       for_each_cpu(cpu, mask) {
-               if (--i < 0)
-                       goto out;
-       }
-
-       ret = -EAGAIN;
-
-out:
-       free_cpumask_var(mask);
-
-       if (!ret)
-               cpumask_set_cpu(cpu, dstp);
-
-       return ret;
+       BUG();
 }
-EXPORT_SYMBOL(cpumask_set_cpu_local_first);
+EXPORT_SYMBOL(cpumask_local_spread);
index aac511417ad19af5d9e3472747a983be5ed3ee4b..a89d041592c8bfa7b092c382962a4085560f5b1a 100644 (file)
@@ -639,7 +639,7 @@ do { \
        **************  MIPS  *****************
        ***************************************/
 #if defined(__mips__) && W_TYPE_SIZE == 32
-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
 #define umul_ppmm(w1, w0, u, v)                        \
 do {                                           \
        UDItype __ll = (UDItype)(u) * (v);      \
@@ -671,7 +671,7 @@ do {                                                \
        **************  MIPS/64  **************
        ***************************************/
 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
 #define umul_ppmm(w1, w0, u, v) \
 do {                                                                   \
        typedef unsigned int __ll_UTItype __attribute__((mode(TI)));    \
index 48144cdae819017e8a9c0a89aae976359d87121b..f051d69f0910a65be2dbce9799736e2ba4eee2c7 100644 (file)
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
  * Compare counter against given value.
  * Return 1 if greater, 0 if equal and -1 if less
  */
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 {
        s64     count;
 
        count = percpu_counter_read(fbc);
        /* Check to see if rough count will be sufficient for comparison */
-       if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
+       if (abs(count - rhs) > (batch * num_online_cpus())) {
                if (count > rhs)
                        return 1;
                else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
        else
                return 0;
 }
-EXPORT_SYMBOL(percpu_counter_compare);
+EXPORT_SYMBOL(__percpu_counter_compare);
 
 static int __init percpu_counter_startup(void)
 {
index 4396434e471536b4772ef06efcb983f87c580889..8609378e6505123a3688e0e95a18cdde013e278a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/random.h>
 #include <linux/rhashtable.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
index a28df5206d95c24d6f3b4116753747f1fb2a67e3..fe9a32591c2498b6266c2fc4753521fb469c876a 100644 (file)
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                        return res + find_zero(data) + 1 - align;
                }
                res += sizeof(unsigned long);
-               if (unlikely(max < sizeof(unsigned long)))
+               /* We already handled 'unsigned long' bytes. Did we do it all ? */
+               if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
                if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
  * Get the size of a NUL-terminated string in user space.
  *
  * Returns the size of the string INCLUDING the terminating NUL.
- * If the string is too long, returns 'count+1'.
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
  * On exception (or invalid count), returns 0.
+ *
+ * NOTE! You should basically never use this function. There is
+ * almost never any valid case for using the length of a user space
+ * string, since the string can be changed at any time by other
+ * threads. Use "strncpy_from_user()" instead to get a stable copy
+ * of the string.
  */
 long strnlen_user(const char __user *str, long count)
 {
index 4abda074ea458947390b84c36f3eaad7095a2ceb..3c365ab6cf5f47711cf45ab65a593a6738713491 100644 (file)
@@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 
-phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-                      enum dma_data_direction dir)
+static phys_addr_t
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+          enum dma_data_direction dir)
 {
        dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
 
index 6dc4580df2af040b10bc10a5f9c423becc3ff47e..000e7b3b9896f2a9479687befd2442c43193614e 100644 (file)
@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
        flush_delayed_work(&bdi->wb.dwork);
 }
 
-/*
- * Called when the device behind @bdi has been removed or ejected.
- *
- * We can't really do much here except for reducing the dirty ratio at
- * the moment.  In the future we should be able to set a flag so that
- * the filesystem can handle errors at mark_inode_dirty time instead
- * of only at writeback time.
- */
-void bdi_unregister(struct backing_dev_info *bdi)
-{
-       if (WARN_ON_ONCE(!bdi->dev))
-               return;
-
-       bdi_set_min_ratio(bdi, 0);
-}
-EXPORT_SYMBOL(bdi_unregister);
-
 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 {
        memset(wb, 0, sizeof(*wb));
@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
        int i;
 
        bdi_wb_shutdown(bdi);
+       bdi_set_min_ratio(bdi, 0);
 
        WARN_ON(!list_empty(&bdi->work_list));
        WARN_ON(delayed_work_pending(&bdi->wb.dwork));
index 14c2f2017e37cc405e52cb12bc30b128997f1f8e..a04225d372ba3ab77516b970c10135b19def3ac4 100644 (file)
@@ -2323,6 +2323,8 @@ done_restock:
        css_get_many(&memcg->css, batch);
        if (batch > nr_pages)
                refill_stock(memcg, batch - nr_pages);
+       if (!(gfp_mask & __GFP_WAIT))
+               goto done;
        /*
         * If the hierarchy is above the normal consumption range,
         * make the charging task trim their excess contribution.
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
 
-       /* XXX: caller holds IRQ-safe mapping->tree_lock */
-       VM_BUG_ON(!irqs_disabled());
-
+       /* Caller disabled preemption with mapping->tree_lock */
        mem_cgroup_charge_statistics(memcg, page, -1);
        memcg_check_events(memcg, page);
 }
index 457bde530cbedcf0dea2f35e219466de0acf204d..9e88f749aa512395daea45f2727545fa0f281533 100644 (file)
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
                 * wait_table may be allocated from boot memory,
                 * here only free if it's allocated by vmalloc.
                 */
-               if (is_vmalloc_addr(zone->wait_table))
+               if (is_vmalloc_addr(zone->wait_table)) {
                        vfree(zone->wait_table);
+                       zone->wait_table = NULL;
+               }
        }
 }
 EXPORT_SYMBOL(try_offline_node);
index 08bd7a3d464a9c6959a39e269d2284600e750a50..a8b5e749e84e7dbd50d325eecf84a47316145598 100644 (file)
@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-       kmem_cache_destroy(pool->handle_cachep);
+       if (pool->handle_cachep)
+               kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
index e0670d7054f97c05d46b74952ee53d6fa6910776..659fb96672e41e2e6525323697ca23a41d271fbb 100644 (file)
@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
        int err = 0;
 
        if (ndm->ndm_flags & NTF_USE) {
+               local_bh_disable();
                rcu_read_lock();
                br_fdb_update(p->br, p, addr, vid, true);
                rcu_read_unlock();
+               local_bh_enable();
        } else {
                spin_lock_bh(&p->br->hash_lock);
                err = fdb_add_entry(p, addr, ndm->ndm_state,
index a3abe6ed111ec236dabc7b19f24c0373efb8f613..ff667e18b2d6313f0a806752a4ef88435e939c4d 100644 (file)
@@ -1167,6 +1167,9 @@ static void br_multicast_add_router(struct net_bridge *br,
        struct net_bridge_port *p;
        struct hlist_node *slot = NULL;
 
+       if (!hlist_unhashed(&port->rlist))
+               return;
+
        hlist_for_each_entry(p, &br->router_list, rlist) {
                if ((unsigned long) port >= (unsigned long) p)
                        break;
@@ -1194,12 +1197,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
        if (port->multicast_router != 1)
                return;
 
-       if (!hlist_unhashed(&port->rlist))
-               goto timer;
-
        br_multicast_add_router(br, port);
 
-timer:
        mod_timer(&port->multicast_router_timer,
                  now + br->multicast_querier_interval);
 }
@@ -1822,7 +1821,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
        if (query->startup_sent < br->multicast_startup_query_count)
                query->startup_sent++;
 
-       RCU_INIT_POINTER(querier, NULL);
+       RCU_INIT_POINTER(querier->port, NULL);
        br_multicast_send_query(br, NULL, query);
        spin_unlock(&br->multicast_lock);
 }
index 24c7c96bf5f8fc9a2929e6e8a1f18017f880bdd7..91180a7fc94376ea3ca7eecf274c03c3bc919590 100644 (file)
@@ -1117,8 +1117,6 @@ static int do_replace(struct net *net, const void __user *user,
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
                return -ENOMEM;
-       if (tmp.num_counters == 0)
-               return -EINVAL;
 
        tmp.name[sizeof(tmp.name) - 1] = 0;
 
@@ -2161,8 +2159,6 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
                return -ENOMEM;
-       if (tmp.num_counters == 0)
-               return -EINVAL;
 
        memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
 
index 4ec0c803aef112196657503cd615fe7a83e800bb..112ad784838a5bf6b46eed6c2b90f2d8b0e50d7a 100644 (file)
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
                release_sock(sk);
                timeo = schedule_timeout(timeo);
                lock_sock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb;
 
                lock_sock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                skb = skb_dequeue(&sk->sk_receive_queue);
                caif_check_flow_release(sk);
 
index 2c1c67fad64d57f3d744c89843816b2d64f5b834..aa82f9ab6a36d164769bf7c9633fcdfd5971466f 100644 (file)
@@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
-       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
-               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-                       atomic_long_inc(&dev->rx_dropped);
-                       kfree_skb(skb);
-                       return NET_RX_DROP;
-               }
-       }
-
-       if (unlikely(!is_skb_forwardable(dev, skb))) {
+       if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+           unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
index 1347e11f5cc9b0aa6ef8485cf44feaa1965a04e9..1d00b89229024b45fef3955cd27221fafe2bfb74 100644 (file)
@@ -359,15 +359,7 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
        int err;
        struct ethtool_cmd cmd;
 
-       if (!dev->ethtool_ops->get_settings)
-               return -EOPNOTSUPP;
-
-       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
-               return -EFAULT;
-
-       cmd.cmd = ETHTOOL_GSET;
-
-       err = dev->ethtool_ops->get_settings(dev, &cmd);
+       err = __ethtool_get_settings(dev, &cmd);
        if (err < 0)
                return err;
 
index 3cfff2a3d651fb7d7cd2baaa3698c123eb7fc00f..41ec02242ea7c2ff57a6b506b685df22c62f3dcc 100644 (file)
@@ -4398,7 +4398,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
 
                while (order) {
                        if (npages >= 1 << order) {
-                               page = alloc_pages(gfp_mask |
+                               page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
                                                   __GFP_COMP |
                                                   __GFP_NOWARN |
                                                   __GFP_NORETRY,
index 292f42228bfb361b5748998bbcc538b1e16a2f22..dc30dc5bb1b892923397fee073d42e9e5ef53a7e 100644 (file)
@@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk)
 
        /*
         * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
-        * progress of swapping. However, if SOCK_MEMALLOC is cleared while
-        * it has rmem allocations there is a risk that the user of the
-        * socket cannot make forward progress due to exceeding the rmem
-        * limits. By rights, sk_clear_memalloc() should only be called
-        * on sockets being torn down but warn and reset the accounting if
-        * that assumption breaks.
+        * progress of swapping. SOCK_MEMALLOC may be cleared while
+        * it has rmem allocations due to the last swapfile being deactivated
+        * but there is a risk that the socket is unusable due to exceeding
+        * the rmem limits. Reclaim the reserves and obey rmem limits again.
         */
-       if (WARN_ON(sk->sk_forward_alloc))
-               sk_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 }
 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 
@@ -1883,7 +1880,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
 
        pfrag->offset = 0;
        if (SKB_FRAG_PAGE_ORDER) {
-               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+               pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
                                          __GFP_NOWARN | __GFP_NORETRY,
                                          SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
index e6f6cc3a1bcf45ee6fa49d6cfe840f58660e3511..392e29a0227dbf4aa4870d73c5ef333db528b675 100644 (file)
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         */
        ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ds->dst = dst;
        ds->index = index;
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 
        ret = dsa_switch_setup_one(ds, parent);
        if (ret)
-               return NULL;
+               return ERR_PTR(ret);
 
        return ds;
 }
index 421a80b09b62358dad5a0fa35d99db73d28472a7..30b544f025acc09aaad99d9adc1e5dbc1227d307 100644 (file)
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index 9f7269f3c54af2ecbc74db4ec2c0f71d5184dc1c..0c152087ca15dd3f97548d3c7123d42bd6626f0e 100644 (file)
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
                        goto drop;
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
-               skb->mark = be32_to_cpu(tunnel->parms.i_key);
 
                return xfrm_input(skb, nexthdr, spi, encap_type);
        }
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!tunnel)
                return 1;
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(tunnel->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl, 0, sizeof(fl));
 
-       skb->mark = be32_to_cpu(tunnel->parms.o_key);
-
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
+
        return vti_xmit(skb, dev, &fl);
 }
 
index 7a5ae50c80c87add1e46e8255f0837796d2e4947..84be008c945c654b692211b943f83e909a622516 100644 (file)
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
 
        tcp_cleanup_congestion_control(sk);
        icsk->icsk_ca_ops = ca;
+       icsk->icsk_ca_setsockopt = 1;
 
        if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        rcu_read_lock();
        ca = __tcp_ca_find_autoload(name);
        /* No change asking for existing value */
-       if (ca == icsk->icsk_ca_ops)
+       if (ca == icsk->icsk_ca_ops) {
+               icsk->icsk_ca_setsockopt = 1;
                goto out;
+       }
        if (!ca)
                err = -ENOENT;
        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
index b5732a54f2ad60f5a528535f7754d644527d4d81..17e7339ee5cadd077769de396b7568a7ccb73e13 100644 (file)
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
                rcu_read_unlock();
        }
 
-       if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+       /* If no valid choice made yet, assign current system default ca. */
+       if (!ca_got_dst &&
+           (!icsk->icsk_ca_setsockopt ||
+            !try_module_get(icsk->icsk_ca_ops->owner)))
                tcp_assign_congestion_control(sk);
 
        tcp_set_ca_state(sk, TCP_CA_Open);
index d10b7e0112ebdb8fa61c650725ae7fae68f7e669..83aa604f9273c332c5a0e5399253d961ef92eb9a 100644 (file)
@@ -90,6 +90,7 @@
 #include <linux/socket.h>
 #include <linux/sockios.h>
 #include <linux/igmp.h>
+#include <linux/inetdevice.h>
 #include <linux/in.h>
 #include <linux/errno.h>
 #include <linux/timer.h>
@@ -1345,10 +1346,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
@@ -1962,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
        struct sock *sk;
        struct dst_entry *dst;
        int dif = skb->dev->ifindex;
+       int ours;
 
        /* validate the packet */
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
@@ -1971,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
        uh = udp_hdr(skb);
 
        if (skb->pkt_type == PACKET_BROADCAST ||
-           skb->pkt_type == PACKET_MULTICAST)
+           skb->pkt_type == PACKET_MULTICAST) {
+               struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+
+               if (!in_dev)
+                       return;
+
+               ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+                                      iph->protocol);
+               if (!ours)
+                       return;
                sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
                                                   uh->source, iph->saddr, dif);
-       else if (skb->pkt_type == PACKET_HOST)
+       } else if (skb->pkt_type == PACKET_HOST) {
                sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
                                             uh->source, iph->saddr, dif);
-       else
+       } else {
                return;
+       }
 
        if (!sk)
                return;
index d873ceea86e6c74c34e7fcd31bec41c78ce5720b..ca09bf49ac6806b399dba51399f84e47590cb9ed 100644 (file)
@@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev)
        free_percpu(idev->stats.ipv6);
 }
 
+static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
+{
+       struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
+
+       snmp6_free_dev(idev);
+       kfree(idev);
+}
+
 /* Nobody refers to this device, we may destroy it. */
 
 void in6_dev_finish_destroy(struct inet6_dev *idev)
@@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
                pr_warn("Freeing alive inet6 device %p\n", idev);
                return;
        }
-       snmp6_free_dev(idev);
-       kfree_rcu(idev, rcu);
+       call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
 }
 EXPORT_SYMBOL(in6_dev_finish_destroy);
index 31f1b5d5e2ef8f7056eb8eddd513ba5b3343e2b1..7c07ce36aae2a5b9cc14cb5a883327b7230b38ee 100644 (file)
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index ed9d681207fa340881fd100db0ea1cb3eb9a2ffb..0224c032dca5dca98ea0146bcdf52c179fa23f6d 100644 (file)
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb)
                }
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-               skb->mark = be32_to_cpu(t->parms.i_key);
 
                rcu_read_unlock();
 
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!t)
                return 1;
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(t->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        struct net_device *tdev;
        struct xfrm_state *x;
        int err = -1;
+       int mtu;
 
        if (!dst)
                goto tx_err_link_failure;
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
 
+       mtu = dst_mtu(dst);
+       if (!skb->ignore_df && skb->len > mtu) {
+               skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               else
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+
+               return -EMSGSIZE;
+       }
+
        err = dst_output(skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        int ret;
 
        memset(&fl, 0, sizeof(fl));
-       skb->mark = be32_to_cpu(t->parms.o_key);
 
        switch (skb->protocol) {
        case htons(ETH_P_IPV6):
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_err;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(t->parms.o_key);
+
        ret = vti6_xmit(skb, dev, &fl);
        if (ret < 0)
                goto tx_err;
index c2ec41617a35481d8b5b11dbae59884a0c16cbb3..e51fc3eee6dbd65506e8612fc5782b9482cf4708 100644 (file)
@@ -525,10 +525,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
index 265e42721a661cf54a46246065168d6a17885147..ff347a0eebd4fdbcbd1580c8af0450c23f673f85 100644 (file)
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
                                           struct ieee80211_roc_work *new_roc,
                                           struct ieee80211_roc_work *cur_roc)
 {
-       unsigned long j = jiffies;
-       unsigned long cur_roc_end = cur_roc->hw_start_time +
-                                   msecs_to_jiffies(cur_roc->duration);
-       struct ieee80211_roc_work *next_roc;
-       int new_dur;
+       unsigned long now = jiffies;
+       unsigned long remaining = cur_roc->hw_start_time +
+                                 msecs_to_jiffies(cur_roc->duration) -
+                                 now;
 
        if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
                return false;
 
-       if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
+       /* if it doesn't fit entirely, schedule a new one */
+       if (new_roc->duration > jiffies_to_msecs(remaining))
                return false;
 
        ieee80211_handle_roc_started(new_roc);
 
-       new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
-
-       /* cur_roc is long enough - add new_roc to the dependents list. */
-       if (new_dur <= 0) {
-               list_add_tail(&new_roc->list, &cur_roc->dependents);
-               return true;
-       }
-
-       new_roc->duration = new_dur;
-
-       /*
-        * if cur_roc was already coalesced before, we might
-        * want to extend the next roc instead of adding
-        * a new one.
-        */
-       next_roc = list_entry(cur_roc->list.next,
-                             struct ieee80211_roc_work, list);
-       if (&next_roc->list != &local->roc_list &&
-           next_roc->chan == new_roc->chan &&
-           next_roc->sdata == new_roc->sdata &&
-           !WARN_ON(next_roc->started)) {
-               list_add_tail(&new_roc->list, &next_roc->dependents);
-               next_roc->duration = max(next_roc->duration,
-                                        new_roc->duration);
-               next_roc->type = max(next_roc->type, new_roc->type);
-               return true;
-       }
-
-       /* add right after cur_roc */
-       list_add(&new_roc->list, &cur_roc->list);
-
+       /* add to dependents so we send the expired event properly */
+       list_add_tail(&new_roc->list, &cur_roc->dependents);
        return true;
 }
 
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
                         * In the offloaded ROC case, if it hasn't begun, add
                         * this new one to the dependent list to be handled
                         * when the master one begins. If it has begun,
-                        * check that there's still a minimum time left and
-                        * if so, start this one, transmitting the frame, but
-                        * add it to the list directly after this one with
-                        * a reduced time so we'll ask the driver to execute
-                        * it right after finishing the previous one, in the
-                        * hope that it'll also be executed right afterwards,
-                        * effectively extending the old one.
-                        * If there's no minimum time left, just add it to the
-                        * normal list.
-                        * TODO: the ROC type is ignored here, assuming that it
-                        * is better to immediately use the current ROC.
+                        * check if it fits entirely within the existing one,
+                        * in which case it will just be dependent as well.
+                        * Otherwise, schedule it by itself.
                         */
                        if (!tmp->hw_begun) {
                                list_add_tail(&roc->list, &tmp->dependents);
index ab46ab4a72498fd04f1c12ac6bb44f867d86869b..c0a9187bc3a9d579b36824fa64ecbbcbd6575110 100644 (file)
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags {
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *     to cfg80211_report_obss_beacon().
+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
+ *     reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags {
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
        IEEE80211_RX_BEACON_REPORTED    = BIT(1),
+       IEEE80211_RX_REORDER_TIMER      = BIT(2),
 };
 
 struct ieee80211_rx_data {
@@ -325,12 +328,6 @@ struct mesh_preq_queue {
        u8 flags;
 };
 
-#if HZ/100 == 0
-#define IEEE80211_ROC_MIN_LEFT 1
-#else
-#define IEEE80211_ROC_MIN_LEFT (HZ/100)
-#endif
-
 struct ieee80211_roc_work {
        struct list_head list;
        struct list_head dependents;
index bab5c63c0bad798529b3c5a964db995be4eef6b0..84cef600c5730e74c6456e801ffa93ef55e4e47f 100644 (file)
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
                       sizeof(sdata->vif.hw_queue));
                sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+               mutex_lock(&local->key_mtx);
+               sdata->crypto_tx_tailroom_needed_cnt +=
+                       master->crypto_tx_tailroom_needed_cnt;
+               mutex_unlock(&local->key_mtx);
+
                break;
                }
        case NL80211_IFTYPE_AP:
index 2291cd7300911514db84c0135369b807e93a9d06..a907f2d5c12d857bf1811af24e57f5af09eb8665 100644 (file)
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
        lockdep_assert_held(&local->key_mtx);
 }
 
+static void
+update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       if (sdata->vif.type != NL80211_IFTYPE_AP)
+               return;
+
+       mutex_lock(&sdata->local->mtx);
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               vlan->crypto_tx_tailroom_needed_cnt += delta;
+
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
 {
        /*
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
         * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
         */
 
+       update_vlan_tailroom_need_count(sdata, 1);
+
        if (!sdata->crypto_tx_tailroom_needed_cnt++) {
                /*
                 * Flush all XMIT packets currently using HW encryption or no
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
        }
 }
 
+static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+                                        int delta)
+{
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+
+       update_vlan_tailroom_need_count(sdata, -delta);
+       sdata->crypto_tx_tailroom_needed_cnt -= delta;
+}
+
 static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 {
        struct ieee80211_sub_if_data *sdata;
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 
                if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
                      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
                        (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
                        schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
                                              HZ/2);
                } else {
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
                }
        }
 
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_key *key;
+       struct ieee80211_sub_if_data *vlan;
 
        ASSERT_RTNL();
 
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->local->key_mtx);
 
-       sdata->crypto_tx_tailroom_needed_cnt = 0;
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                    sdata->crypto_tx_tailroom_pending_dec);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
+                                    vlan->crypto_tx_tailroom_pending_dec);
+       }
 
        list_for_each_entry(key, &sdata->key_list, list) {
                increment_tailroom_need_count(sdata);
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&sdata->local->key_mtx);
 }
 
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       mutex_lock(&sdata->local->key_mtx);
+
+       sdata->crypto_tx_tailroom_needed_cnt = 0;
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       vlan->crypto_tx_tailroom_needed_cnt = 0;
+       }
+
+       mutex_unlock(&sdata->local->key_mtx);
+}
+
 void ieee80211_iter_keys(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         void (*iter)(struct ieee80211_hw *hw,
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_key *key, *tmp;
 
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
 
        ieee80211_debugfs_key_remove_mgmt_default(sdata);
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_sub_if_data *master;
        struct ieee80211_key *key, *tmp;
        LIST_HEAD(keys);
 
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
        list_for_each_entry_safe(key, tmp, &keys, list)
                __ieee80211_key_destroy(key, false);
 
-       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
-                    sdata->crypto_tx_tailroom_pending_dec);
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               if (sdata->bss) {
+                       master = container_of(sdata->bss,
+                                             struct ieee80211_sub_if_data,
+                                             u.ap);
+
+                       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
+                                    master->crypto_tx_tailroom_needed_cnt);
+               }
+       } else {
+               WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                            sdata->crypto_tx_tailroom_pending_dec);
+       }
+
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
                        WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
         */
 
        mutex_lock(&sdata->local->key_mtx);
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
        mutex_unlock(&sdata->local->key_mtx);
 }
index c5a31835be0e0ca22c154b1345d91be761308833..96557dd1e77dff325072cff12b7b671aad942015 100644 (file)
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 void ieee80211_free_sta_keys(struct ieee80211_local *local,
                             struct sta_info *sta);
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
 
 #define key_mtx_dereference(local, ref) \
        rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
index 260eed45b6d2ff105052643169465c04d333c182..5793f75c5ffde91de02e9698bd27500ff4640826 100644 (file)
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               if (rx->local->napi)
+               if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
+                   rx->local->napi)
                        napi_gro_receive(rx->local->napi, skb);
                else
                        netif_receive_skb(skb);
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .flags = 0,
+               .flags = IEEE80211_RX_REORDER_TIMER,
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
index 79412f16b61db9953a4a537db3bd5693d7c61cdb..b864ebc6ab8fbf2a09baca02e650e7fe0314cc75 100644 (file)
@@ -2022,6 +2022,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->sta_mtx);
 
        /* add back keys */
+       list_for_each_entry(sdata, &local->interfaces, list)
+               ieee80211_reset_crypto_tx_tailroom(sdata);
+
        list_for_each_entry(sdata, &local->interfaces, list)
                if (ieee80211_sdata_running(sdata))
                        ieee80211_enable_keys(sdata);
index 7b3f732269e43bb33dc1a6584eaa91b74eab9b64..1f93a5978f2ad43fc81a16427e34d07ca2c0f34e 100644 (file)
@@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev)
 
        RCU_INIT_POINTER(dev->mpls_ptr, NULL);
 
-       kfree(mdev);
+       kfree_rcu(mdev, rcu);
 }
 
 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
@@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_UNREGISTER:
                mpls_ifdown(dev);
                break;
+       case NETDEV_CHANGENAME:
+               mdev = mpls_dev_get(dev);
+               if (mdev) {
+                       int err;
+
+                       mpls_dev_sysctl_unregister(mdev);
+                       err = mpls_dev_sysctl_register(dev, mdev);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
+               break;
        }
        return NOTIFY_OK;
 }
index b064c345042c17ccd9ec841535857fb29041a8a3..8cabeb5a1cb928c856c037c5994116df8547fb71 100644 (file)
@@ -16,6 +16,7 @@ struct mpls_dev {
        int                     input_enabled;
 
        struct ctl_table_header *sysctl;
+       struct rcu_head         rcu;
 };
 
 struct sk_buff;
index 4776282c64175209924740fbd87a56de8e05b609..33e6d6e2908f553516c5ca97c4b93abee7b7057b 100644 (file)
@@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
        if (err)
                goto error_master_upper_dev_unlink;
 
+       dev_disable_lro(netdev_vport->dev);
        dev_set_promiscuity(netdev_vport->dev, 1);
        netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
        rtnl_unlock();
index ad9eed70bc8f8e16c3118c6527374a952823e2c0..1e1c89e51a118e79610c49412e335191fc3ba834 100644 (file)
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                if (dev->flags & IFF_UP)
                        dev_deactivate(dev);
 
-               if (new && new->ops->attach) {
-                       new->ops->attach(new);
-                       num_q = 0;
-               }
+               if (new && new->ops->attach)
+                       goto skip;
 
                for (i = 0; i < num_q; i++) {
                        struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                                qdisc_destroy(old);
                }
 
+skip:
                if (!ingress) {
                        notify_and_destroy(net, skb, n, classid,
                                           dev->qdisc, new);
                        if (new && !new->ops->attach)
                                atomic_inc(&new->refcnt);
                        dev->qdisc = new ? : &noop_qdisc;
+
+                       if (new && new->ops->attach)
+                               new->ops->attach(new);
                } else {
                        notify_and_destroy(net, skb, n, classid, old, new);
                }
index fb7976aee61c84f38aecdc5c5f0d8be20e577fa9..4f15b7d730e13d6aaa58ba7a28262c9831afea95 100644 (file)
@@ -381,13 +381,14 @@ nomem:
 }
 
 
-/* Public interface to creat the association shared key.
+/* Public interface to create the association shared key.
  * See code above for the algorithm.
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
+       struct sctp_chunk *chunk;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
@@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
        sctp_auth_key_put(asoc->asoc_shared_key);
        asoc->asoc_shared_key = secret;
 
+       /* Update send queue in case any chunk already in there now
+        * needs authenticating
+        */
+       list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) {
+               if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc))
+                       chunk->auth = 1;
+       }
+
        return 0;
 }
 
index 9074b5cede38b8edd75890b684a706d96b9f71ba..f485600c4507bc152cef654ae5667a03a52d990c 100644 (file)
@@ -2142,11 +2142,17 @@ static void tipc_sk_timeout(unsigned long data)
        peer_node = tsk_peer_node(tsk);
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
-               /* Previous probe not answered -> self abort */
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
-                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
-                                     own_node, peer_node, tsk->portid,
-                                     peer_port, TIPC_ERR_NO_PORT);
+               if (!sock_owned_by_user(sk)) {
+                       sk->sk_socket->state = SS_DISCONNECTING;
+                       tsk->connected = 0;
+                       tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+                                             tsk_peer_port(tsk));
+                       sk->sk_state_change(sk);
+               } else {
+                       /* Try again later */
+                       sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
+               }
+
        } else {
                skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
                                      INT_H_SIZE, 0, peer_node, own_node,
index 5266ea7b922b76d1977dea57cc7c227594c49285..06430598cf512fdaff480671620e8fa69c259bb5 100644 (file)
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
                unix_state_unlock(sk);
                timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb, *last;
 
                unix_state_lock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                last = skb = skb_peek(&sk->sk_receive_queue);
 again:
                if (skb == NULL) {
index fff1bef6ed6d916f9019a63d708652f4ab07cddf..fd682832a0e3635d52c734871d5402d270336dc3 100644 (file)
@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
        wdev_unlock(wdev);
 
+       memset(&sinfo, 0, sizeof(sinfo));
+
        if (rdev_get_station(rdev, dev, bssid, &sinfo))
                return NULL;
 
index 526c4feb3b50d723d24b8c55288c8c941257da52..b58286ecd156fdb9de2a33ca0ede0fe3194bf289 100644 (file)
@@ -13,6 +13,8 @@
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
 
 static struct kmem_cache *secpath_cachep __read_mostly;
 
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        struct xfrm_state *x = NULL;
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
+       u32 mark = skb->mark;
        unsigned int family;
        int decaps = 0;
        int async = 0;
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                                   XFRM_SPI_SKB_CB(skb)->daddroff);
        family = XFRM_SPI_SKB_CB(skb)->family;
 
+       /* if tunnel is present override skb->mark value with tunnel i_key */
+       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
+               switch (family) {
+               case AF_INET:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
+                       break;
+               case AF_INET6:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
+                       break;
+               }
+       }
+
        /* Allocate new secpath or COW existing one. */
        if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
                struct sec_path *sp;
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        goto drop;
                }
 
-               x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
+               x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
                if (x == NULL) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
                        xfrm_audit_state_notfound(skb, family, spi, seq);
index dab57daae40856030790fa8070068a59d82220af..4fd725a0c500ebf69a02e06fcf37ae3035ae0d98 100644 (file)
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(x->replay.oseq == 0)) {
                        x->replay.oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(replay_esn->oseq == 0)) {
                        replay_esn->oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
index f5e39e35d73aa96c3551b0e46f9b26ab291d23aa..96688cd0f6f11bddee4451de1d09a9a8e5f212dd 100644 (file)
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
                        x->id.spi != spi)
                        continue;
 
-               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                xfrm_state_hold(x);
+               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                return x;
        }
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
index 89b1df4e72ab3423bce45011fb03f86c193f5ad4..c5ec977b9c3786097b214e1c835efd8fa337c173 100755 (executable)
@@ -3169,12 +3169,12 @@ sub process {
                }
 
 # check for global initialisers.
-               if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
+               if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
                        if (ERROR("GLOBAL_INITIALISERS",
                                  "do not initialise globals to 0 or NULL\n" .
                                      $herecurr) &&
                            $fix) {
-                               $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
+                               $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
                        }
                }
 # check for static initialisers.
index a1504c4f19003d6d4a57971471b2d873f4e2bb09..25db8cff44a2036c0ecf1da69c3da5c0d567b782 100644 (file)
@@ -73,18 +73,11 @@ class LxLsmod(gdb.Command):
                 "        " if utils.get_long_type().sizeof == 8 else ""))
 
         for module in module_list():
-            ref = 0
-            module_refptr = module['refptr']
-            for cpu in cpus.cpu_list("cpu_possible_mask"):
-                refptr = cpus.per_cpu(module_refptr, cpu)
-                ref += refptr['incs']
-                ref -= refptr['decs']
-
             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
                 address=str(module['module_core']).split()[0],
                 name=module['name'].string(),
                 size=str(module['core_size']),
-                ref=str(ref)))
+                ref=str(module['refcnt']['counter'])))
 
             source_list = module['source_list']
             t = self._module_use_type.get_type().pointer()
index 7371e0c3926f32a9104b521d0bf70f1c35f0740f..1eabcdf69457311129b766ec237d37e402f640bc 100644 (file)
@@ -246,6 +246,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
                return hda_reg_read_stereo_amp(codec, reg, val);
        if (verb == AC_VERB_GET_PROC_COEF)
                return hda_reg_read_coef(codec, reg, val);
+       if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
+               reg &= ~AC_AMP_FAKE_MUTE;
+
        err = snd_hdac_exec_verb(codec, reg, 0, val);
        if (err < 0)
                return err;
@@ -265,6 +268,9 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
        unsigned int verb;
        int i, bytes, err;
 
+       if (codec->caps_overwriting)
+               return 0;
+
        reg &= ~0x00080000U; /* drop GET bit */
        reg |= (codec->addr << 28);
        verb = get_verb(reg);
@@ -280,6 +286,8 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
 
        switch (verb & 0xf00) {
        case AC_VERB_SET_AMP_GAIN_MUTE:
+               if ((reg & AC_AMP_FAKE_MUTE) && (val & AC_AMP_MUTE))
+                       val = 0;
                verb = AC_VERB_SET_AMP_GAIN_MUTE;
                if (reg & AC_AMP_GET_LEFT)
                        verb |= AC_AMP_SET_LEFT >> 8;
index b49feff0a31982e7c22071c08e8d088e91a97727..5645481af3d9571b8340c963a27c34e377c405c5 100644 (file)
@@ -436,7 +436,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
            get_wcaps_type(wcaps) != AC_WID_PIN)
                return 0;
 
-       parm = snd_hda_param_read(codec, nid, AC_PAR_DEVLIST_LEN);
+       parm = snd_hdac_read_parm_uncached(&codec->core, nid, AC_PAR_DEVLIST_LEN);
        if (parm == -1 && codec->bus->rirb_error)
                parm = 0;
        return parm & AC_DEV_LIST_LEN_MASK;
@@ -1375,6 +1375,31 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
 }
 EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps);
 
+/**
+ * snd_hda_codec_amp_update - update the AMP mono value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel to update (0 or 1)
+ * @dir: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the AMP values for the given channel, direction and index.
+ */
+int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid,
+                            int ch, int dir, int idx, int mask, int val)
+{
+       unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+
+       /* enable fake mute if no h/w mute but min=mute */
+       if ((query_amp_caps(codec, nid, dir) &
+            (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE)
+               cmd |= AC_AMP_FAKE_MUTE;
+       return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val);
+}
+EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update);
+
 /**
  * snd_hda_codec_amp_stereo - update the AMP stereo values
  * @codec: HD-audio codec
index 1c8678775f4078df5b747a0a259b8c6ef33fb38b..ac0db1679f098ee4ec08c6770fe1f1374c8bf431 100644 (file)
@@ -4926,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
  dig_only:
        parse_digital(codec);
 
-       if (spec->power_down_unused || codec->power_save_node)
+       if (spec->power_down_unused || codec->power_save_node) {
                if (!codec->power_filter)
                        codec->power_filter = snd_hda_gen_path_power_filter;
+               if (!codec->patch_ops.stream_pm)
+                       codec->patch_ops.stream_pm = snd_hda_gen_stream_pm;
+       }
 
        if (!spec->no_analog && spec->beep_nid) {
                err = snd_hda_attach_beep_device(codec, spec->beep_nid);
index 34040d26c94ff04c84e8ee1cb2115eb76b0e755d..a244ba706317379445f6b11a03ec118fc16eea05 100644 (file)
@@ -340,6 +340,11 @@ enum {
 #define use_vga_switcheroo(chip)       0
 #endif
 
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+                                       ((pci)->device == 0x0c0c) || \
+                                       ((pci)->device == 0x0d0c) || \
+                                       ((pci)->device == 0x160c))
+
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
        [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -1854,8 +1859,17 @@ static int azx_probe_continue(struct azx *chip)
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
 #ifdef CONFIG_SND_HDA_I915
                err = hda_i915_init(hda);
-               if (err < 0)
-                       goto out_free;
+               if (err < 0) {
+                       /* if the controller is bound only with HDMI/DP
+                        * (for HSW and BDW), we need to abort the probe;
+                        * for other chips, still continue probing as other
+                        * codecs can be on the same link.
+                        */
+                       if (CONTROLLER_IN_GPU(pci))
+                               goto out_free;
+                       else
+                               goto skip_i915;
+               }
                err = hda_display_power(hda, true);
                if (err < 0) {
                        dev_err(chip->card->dev,
@@ -1865,6 +1879,7 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 
+ skip_i915:
        err = azx_first_init(chip);
        if (err < 0)
                goto out_free;
@@ -2089,6 +2104,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 3b567f42296b9d6b2ca148c66c59c12b5628cb9e..bed66c3144318de3f82dcddeb2445a84ad9ce594 100644 (file)
@@ -129,8 +129,8 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
 /* lowlevel accessor with caching; use carefully */
 #define snd_hda_codec_amp_read(codec, nid, ch, dir, idx) \
        snd_hdac_regmap_get_amp(&(codec)->core, nid, ch, dir, idx)
-#define snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val) \
-       snd_hdac_regmap_update_amp(&(codec)->core, nid, ch, dir, idx, mask, val)
+int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid,
+                            int ch, int dir, int idx, int mask, int val);
 int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
                             int dir, int idx, int mask, int val);
 int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch,
index 31f8f13be907a298fe7b785c33acb56b36fa4d5c..0320cb523d9e68112d34f28b8f621c6c10cc6cc3 100644 (file)
@@ -884,6 +884,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0275, 0x1028, 0, "ALC3260" },
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
        { 0x10ec0298, 0x1028, 0, "ALC3266" },
+       { 0x10ec0256, 0x1028, 0, "ALC3246" },
        { 0x10ec0670, 0x1025, 0, "ALC669X" },
        { 0x10ec0676, 0x1025, 0, "ALC679X" },
        { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -2167,6 +2168,7 @@ static const struct hda_fixup alc882_fixups[] = {
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+       SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
@@ -4227,6 +4229,11 @@ static void alc_fixup_headset_mode_alc662(struct hda_codec *codec,
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
                spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */
+
+               /* Disable boost for mic-in permanently. (This code is only called
+                  from quirks that guarantee that the headphone is at NID 0x1b.) */
+               snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000);
+               snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP);
        } else
                alc_fixup_headset_mode(codec, fix, action);
 }
@@ -5370,6 +5377,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC255_STANDARD_PINS,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
                {0x13, 0x40000000}),
index 43c99ce4a520c3fc24a720f6c76ba4222403ad3b..6833c74ed6ff47f60598d6d250b8709d914590a5 100644 (file)
@@ -4403,7 +4403,6 @@ static const struct hda_codec_ops stac_patch_ops = {
 #ifdef CONFIG_PM
        .suspend = stac_suspend,
 #endif
-       .stream_pm = snd_hda_gen_stream_pm,
        .reboot_notify = stac_shutup,
 };
 
@@ -4697,7 +4696,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
                return err;
 
        spec = codec->spec;
-       codec->power_save_node = 1;
+       /* disabled power_save_node since it causes noises on a Dell machine */
+       /* codec->power_save_node = 1; */
        spec->linear_tone_beep = 0;
        spec->gen.own_eapd_ctl = 1;
        spec->gen.power_down_unused = 1;
index 31a95cca015d4d1c34a1facff2e226b6821a5203..bab6c04932aa050ff63f054bf172acf288f5ee5e 100644 (file)
@@ -449,6 +449,15 @@ static int via_suspend(struct hda_codec *codec)
 
        return 0;
 }
+
+static int via_resume(struct hda_codec *codec)
+{
+       /* some delay here to make jack detection working (bko#98921) */
+       msleep(10);
+       codec->patch_ops.init(codec);
+       regcache_sync(codec->core.regmap);
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -475,6 +484,7 @@ static const struct hda_codec_ops via_patch_ops = {
        .stream_pm = snd_hda_gen_stream_pm,
 #ifdef CONFIG_PM
        .suspend = via_suspend,
+       .resume = via_resume,
        .check_power_status = via_check_power_status,
 #endif
 };
index d51703e305238700bce9da8184971ffa516247ca..0a4ad5feb82e7817f7036f86c973d02b1dfecb9b 100644 (file)
@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
                        old_vmaster_hook = spec->vmaster_mute.hook;
                        spec->vmaster_mute.hook = update_tpacpi_mute_led;
-                       spec->vmaster_mute_enum = 1;
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
index 3e2ef61c627b831bfec65724cc7166db051f5099..8b7e391dd0b80193d49f8634bb69fa45814593f0 100644 (file)
@@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
        case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+       case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
        case USB_ID(0x046d, 0x0991):
        /* Most audio usb devices lie about volume resolution.
         * Most Logitech webcams have res = 384.
@@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                              unitid);
                return -EINVAL;
        }
-       /* no bmControls field (e.g. Maya44) -> ignore */
-       if (desc->bLength <= 10 + input_pins) {
-               usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
-                             unitid);
-               return 0;
-       }
 
        num_ins = 0;
        ich = 0;
@@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                err = parse_audio_unit(state, desc->baSourceID[pin]);
                if (err < 0)
                        continue;
+               /* no bmControls field (e.g. Maya44) -> ignore */
+               if (desc->bLength <= 10 + input_pins)
+                       continue;
                err = check_input_term(state, desc->baSourceID[pin], &iterm);
                if (err < 0)
                        return err;
index b703cb3cda1993402d60efc03e9e7d840cb68f72..e5000da9e9d7093f6e287194665de2d63f046e93 100644 (file)
@@ -436,6 +436,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x200c, 0x1018),
                .map = ebox44_map,
        },
+       {
+               /* MAYA44 USB+ */
+               .id = USB_ID(0x2573, 0x0008),
+               .map = maya44_map,
+       },
        {
                /* KEF X300A */
                .id = USB_ID(0x27ac, 0x1000),
index 46facfc9aec196767cdd1840389ea38450e18d54..754e689596a21b43f3b3a45b8f3062ec29b74099 100644 (file)
@@ -1118,7 +1118,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+       case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+       case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
                return true;
        }
        return false;
@@ -1265,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
-       /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
-       case USB_ID(0x20b1, 0x2009):
+
+       case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+       case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index c5baf9c591b7bb5a2c280e173f4e0a2b561285fa..618c2bcd4eabc6143b0e7f0431f57b8620101fe5 100644 (file)
@@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        assert(ret == 0);
 
        ptr = haystack;
+       memset(pmatch, 0, sizeof(pmatch));
+
        while (1) {
                ret = regexec(&regex, ptr, 1, pmatch, 0);
                if (ret == 0) {
index bac98ca3d4ca7e4cd8efb36e79d550a43bb4d1d4..323b65edfc970b5ba5783de3c16f8b684728e47b 100644 (file)
@@ -52,6 +52,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_knl_cstates;
 unsigned int do_pc2;
 unsigned int do_pc3;
 unsigned int do_pc6;
@@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons;
 unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
+int base_cpu;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -316,7 +318,7 @@ void print_header(void)
 
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c1");
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "  CPU%%c3");
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c6");
@@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
@@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                return 0;
 
-       if (do_nhm_cstates && !do_slm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
+       } else if (do_knl_cstates) {
+               if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+                       return -7;
        }
 
        if (do_snb_cstates)
@@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
 
        fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
 
@@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void)
        fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
                ratio, bclk, ratio * bclk);
 
-       get_msr(0, MSR_IA32_POWER_CTL, &msr);
+       get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
        fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
                msr, msr & 0x2 ? "EN" : "DIS");
 
@@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
 
@@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
 
@@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
 
@@ -1295,12 +1300,73 @@ dump_nhm_turbo_ratio_limits(void)
        return;
 }
 
+static void
+dump_knl_turbo_ratio_limits(void)
+{
+       int cores;
+       unsigned int ratio;
+       unsigned long long msr;
+       int delta_cores;
+       int delta_ratio;
+       int i;
+
+       get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
+
+       fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
+       msr);
+
+       /**
+        * Turbo encoding in KNL is as follows:
+        * [7:0] -- Base value of number of active cores of bucket 1.
+        * [15:8] -- Base value of freq ratio of bucket 1.
+        * [20:16] -- +ve delta of number of active cores of bucket 2.
+        * i.e. active cores of bucket 2 =
+        * active cores of bucket 1 + delta
+        * [23:21] -- Negative delta of freq ratio of bucket 2.
+        * i.e. freq ratio of bucket 2 =
+        * freq ratio of bucket 1 - delta
+        * [28:24]-- +ve delta of number of active cores of bucket 3.
+        * [31:29]-- -ve delta of freq ratio of bucket 3.
+        * [36:32]-- +ve delta of number of active cores of bucket 4.
+        * [39:37]-- -ve delta of freq ratio of bucket 4.
+        * [44:40]-- +ve delta of number of active cores of bucket 5.
+        * [47:45]-- -ve delta of freq ratio of bucket 5.
+        * [52:48]-- +ve delta of number of active cores of bucket 6.
+        * [55:53]-- -ve delta of freq ratio of bucket 6.
+        * [60:56]-- +ve delta of number of active cores of bucket 7.
+        * [63:61]-- -ve delta of freq ratio of bucket 7.
+        */
+       cores = msr & 0xFF;
+       ratio = (msr >> 8) && 0xFF;
+       if (ratio > 0)
+               fprintf(stderr,
+                       "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                       ratio, bclk, ratio * bclk, cores);
+
+       for (i = 16; i < 64; i = i + 8) {
+               delta_cores = (msr >> i) & 0x1F;
+               delta_ratio = (msr >> (i + 5)) && 0x7;
+               if (!delta_cores || !delta_ratio)
+                       return;
+               cores = cores + delta_cores;
+               ratio = ratio - delta_ratio;
+
+               /** -ve ratios will make successive ratio calculations
+                * negative. Hence return instead of carrying on.
+                */
+               if (ratio > 0)
+                       fprintf(stderr,
+                               "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                               ratio, bclk, ratio * bclk, cores);
+       }
+}
+
 static void
 dump_nhm_cst_cfg(void)
 {
        unsigned long long msr;
 
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
@@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...)
 }
 
 /*
- * cpu_is_first_sibling_in_core(cpu)
- * return 1 if given CPU is 1st HT sibling in the core
+ * get_cpu_position_in_core(cpu)
+ * return the position of the CPU among its HT siblings in the core
+ * return -1 if the sibling is not in list
  */
-int cpu_is_first_sibling_in_core(int cpu)
+int get_cpu_position_in_core(int cpu)
 {
-       return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+       char path[64];
+       FILE *filep;
+       int this_cpu;
+       char character;
+       int i;
+
+       sprintf(path,
+               "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
+               cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
+               exit(1);
+       }
+
+       for (i = 0; i < topo.num_threads_per_core; i++) {
+               fscanf(filep, "%d", &this_cpu);
+               if (this_cpu == cpu) {
+                       fclose(filep);
+                       return i;
+               }
+
+               /* Account for no separator after last thread*/
+               if (i != (topo.num_threads_per_core - 1))
+                       fscanf(filep, "%c", &character);
+       }
+
+       fclose(filep);
+       return -1;
 }
 
 /*
@@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu)
 {
        char path[80];
        FILE *filep;
-       int sib1, sib2;
-       int matches;
+       int sib1;
+       int matches = 0;
        char character;
+       char str[100];
+       char *ch;
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
        filep = fopen_or_die(path, "r");
+
        /*
         * file format:
-        * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
-        * otherwinse 1 sibling (self).
+        * A ',' separated or '-' separated set of numbers
+        * (eg 1-2 or 1,3,4,5)
         */
-       matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
+       fscanf(filep, "%d%c\n", &sib1, &character);
+       fseek(filep, 0, SEEK_SET);
+       fgets(str, 100, filep);
+       ch = strchr(str, character);
+       while (ch != NULL) {
+               matches++;
+               ch = strchr(ch+1, character);
+       }
 
        fclose(filep);
-
-       if (matches == 3)
-               return 2;
-       else
-               return 1;
+       return matches+1;
 }
 
 /*
@@ -1594,8 +1695,10 @@ restart:
 void check_dev_msr()
 {
        struct stat sb;
+       char pathname[32];
 
-       if (stat("/dev/cpu/0/msr", &sb))
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (stat(pathname, &sb))
                if (system("/sbin/modprobe msr > /dev/null 2>&1"))
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
@@ -1608,6 +1711,7 @@ void check_permissions()
        cap_user_data_t cap_data = &cap_data_data;
        extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
        int do_exit = 0;
+       char pathname[32];
 
        /* check for CAP_SYS_RAWIO */
        cap_header->pid = getpid();
@@ -1622,7 +1726,8 @@ void check_permissions()
        }
 
        /* test file permissions */
-       if (euidaccess("/dev/cpu/0/msr", R_OK)) {
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (euidaccess(pathname, R_OK)) {
                do_exit++;
                warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
        }
@@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        default:
                return 0;
        }
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
        pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
@@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
        }
 }
 
+int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       if (family != 6)
+               return 0;
+
+       switch (model) {
+       case 0x57:      /* Knights Landing */
+               return 1;
+       default:
+               return 0;
+       }
+}
 static void
 dump_cstate_pstate_config_info(family, model)
 {
@@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model)
        if (has_nhm_turbo_ratio_limit(family, model))
                dump_nhm_turbo_ratio_limits();
 
+       if (has_knl_turbo_ratio_limit(family, model))
+               dump_knl_turbo_ratio_limits();
+
        dump_nhm_cst_cfg();
 }
 
@@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
                return 0;
 
-       switch (msr & 0x7) {
+       switch (msr & 0xF) {
        case ENERGY_PERF_BIAS_PERFORMANCE:
                epb_string = "performance";
                break;
@@ -1925,7 +2048,7 @@ double get_tdp(model)
        unsigned long long msr;
 
        if (do_rapl & RAPL_PKG_POWER_INFO)
-               if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
+               if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
                        return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
        switch (model) {
@@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                return (rapl_dram_energy_units = 15.3 / 1000000);
        default:
                return (rapl_energy_units);
@@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
@@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        }
 
        /* units on package 0, verify later other packages match */
-       if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
+       if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
                return;
 
        rapl_power_units = 1.0 / (1 << (msr & 0xF));
@@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model)
        return 0;
 }
 
+int is_knl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+       switch (model) {
+       case 0x57:      /* KNL */
+               return 1;
+       }
+       return 0;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2340,7 +2476,7 @@ double slm_bclk(void)
        unsigned int i;
        double freq;
 
-       if (get_msr(0, MSR_FSB_FREQ, &msr))
+       if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
                fprintf(stderr, "SLM BCLK: unknown\n");
 
        i = msr & 0xf;
@@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
        if (!do_nhm_platform_info)
                goto guess;
 
-       if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
+       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
                goto guess;
 
        target_c_local = (msr >> 16) & 0xFF;
@@ -2541,6 +2677,7 @@ void process_cpuid()
        do_c8_c9_c10 = has_hsw_msrs(family, model);
        do_skl_residency = has_skl_msrs(family, model);
        do_slm_cstates = is_slm(family, model);
+       do_knl_cstates  = is_knl(family, model);
        bclk = discover_bclk(family, model);
 
        rapl_probe(family, model);
@@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id)
 
        my_package_id = get_physical_package_id(cpu_id);
        my_core_id = get_core_id(cpu_id);
-
-       if (cpu_is_first_sibling_in_core(cpu_id)) {
-               my_thread_id = 0;
+       my_thread_id = get_cpu_position_in_core(cpu_id);
+       if (!my_thread_id)
                topo.num_cores++;
-       } else {
-               my_thread_id = 1;
-       }
 
        init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
        init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
@@ -2785,13 +2918,24 @@ void setup_all_buffers(void)
        for_all_proc_cpus(initialize_counters);
 }
 
+void set_base_cpu(void)
+{
+       base_cpu = sched_getcpu();
+       if (base_cpu < 0)
+               err(-ENODEV, "No valid cpus found");
+
+       if (debug > 1)
+               fprintf(stderr, "base_cpu = %d\n", base_cpu);
+}
+
 void turbostat_init()
 {
+       setup_all_buffers();
+       set_base_cpu();
        check_dev_msr();
        check_permissions();
        process_cpuid();
 
-       setup_all_buffers();
 
        if (debug)
                for_all_cpus(print_epb, ODD_COUNTERS);
@@ -2870,7 +3014,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(stderr, "turbostat version 4.5 2 Apr, 2015"
+       fprintf(stderr, "turbostat version 4.7 27-May, 2015"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 5bdb781163d1f2d0eb6a69fea8a976fa2873f3b1..9b0d8baf2934ed30acb88df525275aa2da64f275 100644 (file)
@@ -5,8 +5,10 @@ include ../lib.mk
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
 TARGETS_C_BOTHBITS := sigreturn single_step_syscall
+TARGETS_C_32BIT_ONLY := entry_from_vm86
 
-BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
+TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
 BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
 
 CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
@@ -32,7 +34,7 @@ all_64: $(BINARIES_64)
 clean:
        $(RM) $(BINARIES_32) $(BINARIES_64)
 
-$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c
+$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 $(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
new file mode 100644 (file)
index 0000000..5c38a18
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * entry_from_vm86.c - tests kernel entries from vm86 mode
+ * Copyright (c) 2014-2015 Andrew Lutomirski
+ *
+ * This exercises a few paths that need to special-case vm86 mode.
+ *
+ * GPL v2.
+ */
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <err.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/vm86.h>
+
+static unsigned long load_addr = 0x10000;
+static int nerrs = 0;
+
+asm (
+       ".pushsection .rodata\n\t"
+       ".type vmcode_bound, @object\n\t"
+       "vmcode:\n\t"
+       "vmcode_bound:\n\t"
+       ".code16\n\t"
+       "bound %ax, (2048)\n\t"
+       "int3\n\t"
+       "vmcode_sysenter:\n\t"
+       "sysenter\n\t"
+       ".size vmcode, . - vmcode\n\t"
+       "end_vmcode:\n\t"
+       ".code32\n\t"
+       ".popsection"
+       );
+
+extern unsigned char vmcode[], end_vmcode[];
+extern unsigned char vmcode_bound[], vmcode_sysenter[];
+
+static void do_test(struct vm86plus_struct *v86, unsigned long eip,
+                   const char *text)
+{
+       long ret;
+
+       printf("[RUN]\t%s from vm86 mode\n", text);
+       v86->regs.eip = eip;
+       ret = vm86(VM86_ENTER, v86);
+
+       if (ret == -1 && errno == ENOSYS) {
+               printf("[SKIP]\tvm86 not supported\n");
+               return;
+       }
+
+       if (VM86_TYPE(ret) == VM86_INTx) {
+               char trapname[32];
+               int trapno = VM86_ARG(ret);
+               if (trapno == 13)
+                       strcpy(trapname, "GP");
+               else if (trapno == 5)
+                       strcpy(trapname, "BR");
+               else if (trapno == 14)
+                       strcpy(trapname, "PF");
+               else
+                       sprintf(trapname, "%d", trapno);
+
+               printf("[OK]\tExited vm86 mode due to #%s\n", trapname);
+       } else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
+               printf("[OK]\tExited vm86 mode due to unhandled GP fault\n");
+       } else {
+               printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n",
+                      VM86_TYPE(ret), VM86_ARG(ret));
+       }
+}
+
+int main(void)
+{
+       struct vm86plus_struct v86;
+       unsigned char *addr = mmap((void *)load_addr, 4096,
+                                  PROT_READ | PROT_WRITE | PROT_EXEC,
+                                  MAP_ANONYMOUS | MAP_PRIVATE, -1,0);
+       if (addr != (unsigned char *)load_addr)
+               err(1, "mmap");
+
+       memcpy(addr, vmcode, end_vmcode - vmcode);
+       addr[2048] = 2;
+       addr[2050] = 3;
+
+       memset(&v86, 0, sizeof(v86));
+
+       v86.regs.cs = load_addr / 16;
+       v86.regs.ss = load_addr / 16;
+       v86.regs.ds = load_addr / 16;
+       v86.regs.es = load_addr / 16;
+
+       assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
+
+       /* #BR -- should deliver SIG??? */
+       do_test(&v86, vmcode_bound - vmcode, "#BR");
+
+       /* SYSENTER -- should cause #GP or #UD depending on CPU */
+       do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER");
+
+       return (nerrs == 0 ? 0 : 1);
+}