]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch '4.0-fixes' into mips-for-linux-next
authorRalf Baechle <ralf@linux-mips.org>
Tue, 7 Apr 2015 23:17:03 +0000 (01:17 +0200)
committerRalf Baechle <ralf@linux-mips.org>
Tue, 7 Apr 2015 23:17:03 +0000 (01:17 +0200)
323 files changed:
Documentation/input/event-codes.txt
Documentation/input/multi-touch-protocol.txt
MAINTAINERS
Makefile
arch/arc/kernel/signal.c
arch/arm/Kconfig
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/mach-omap2/id.c
arch/arm/mach-pxa/irq.c
arch/arm/mach-pxa/zeus.c
arch/arm/mach-sunxi/Kconfig
arch/arm/plat-omap/dmtimer.c
arch/arm64/boot/dts/arm/juno-clocks.dtsi
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/percpu.h
arch/metag/include/asm/io.h
arch/metag/include/asm/pgtable-bits.h [new file with mode: 0644]
arch/metag/include/asm/pgtable.h
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/bcm47xx/board.c
arch/mips/bcm63xx/prom.c
arch/mips/bcm63xx/setup.c
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/include/asm/asm-eva.h
arch/mips/include/asm/cacheflush.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
arch/mips/include/asm/octeon/pci-octeon.h
arch/mips/include/asm/pgtable-32.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/spinlock.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/entry.S
arch/mips/kernel/proc.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/unaligned.c
arch/mips/loongson/loongson-3/irq.c
arch/mips/mm/cache.c
arch/mips/mm/init.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-memory.c
arch/mips/netlogic/xlp/ahci-init-xlp2.c
arch/mips/pci/Makefile
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mips/ralink/Kconfig
arch/parisc/include/asm/pgalloc.h
arch/parisc/kernel/syscall_table.S
arch/powerpc/include/asm/cputhreads.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/mobility.c
arch/s390/include/asm/elf.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/swsusp_asm64.S
arch/sparc/include/asm/hypervisor.h
arch/sparc/kernel/hvapi.c
arch/sparc/kernel/hvcalls.S
arch/sparc/kernel/pcr.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/process_64.c
arch/sparc/lib/memmove.S
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/kgdb.c
arch/x86/kernel/reboot.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
arch/x86/xen/p2m.c
block/blk-merge.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-settings.c
drivers/ata/libata-core.c
drivers/base/regmap/internal.h
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/block/nbd.c
drivers/block/nvme-core.c
drivers/clocksource/Kconfig
drivers/clocksource/timer-sun5i.c
drivers/dma/bcm2835-dma.c
drivers/dma/dma-jz4740.c
drivers/dma/edma.c
drivers/dma/moxart-dma.c
drivers/dma/omap-dma.c
drivers/firmware/dmi_scan.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-syscon.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/vce_v2_0.c
drivers/iio/accel/bma180.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/vf610_adc.c
drivers/iio/gyro/bmg160.c
drivers/iio/imu/adis_trigger.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/imu/kmx61.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/core/umem.c
drivers/input/mouse/alps.c
drivers/input/mouse/synaptics.c
drivers/iommu/arm-smmu.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/irq-gic-v3-its.c
drivers/lguest/Kconfig
drivers/md/dm.c
drivers/mfd/kempld-core.c
drivers/mfd/rtsx_usb.c
drivers/net/bonding/bond_main.c
drivers/net/can/flexcan.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_ucan.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/usb/asix_common.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/r8152.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/brcm80211/brcmfmac/feature.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/rtlwifi/pci.c
drivers/of/address.c
drivers/regulator/palmas-regulator.c
drivers/rtc/rtc-mrst.c
drivers/scsi/ipr.c
drivers/scsi/libsas/sas_ata.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-qup.c
drivers/spi/spi.c
drivers/ssb/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/iio/magnetometer/hmc5843_core.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/samsung.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/isp1760/isp1760-udc.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan_pda.c
drivers/watchdog/imgpdc_wdt.c
drivers/watchdog/mtk_wdt.c
drivers/xen/Kconfig
drivers/xen/balloon.c
fs/affs/file.c
fs/cifs/cifsencrypt.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/fs-writeback.c
fs/hfsplus/brec.c
fs/locks.c
fs/nfsd/blocklayout.c
fs/nfsd/blocklayoutxdr.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
include/linux/fs.h
include/linux/irqchip/arm-gic-v3.h
include/linux/lcm.h
include/linux/libata.h
include/linux/mfd/palmas.h
include/linux/regulator/driver.h
include/linux/sched.h
include/linux/sunrpc/debug.h
include/linux/usb/usbnet.h
include/linux/writeback.h
include/net/netfilter/nf_log.h
include/trace/events/regmap.h
include/uapi/linux/input.h
include/uapi/linux/nfsd/export.h
kernel/events/core.c
kernel/locking/lockdep.c
kernel/module.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sysctl.c
kernel/time/tick-broadcast-hrtimer.c
lib/lcm.c
lib/nlattr.c
mm/huge_memory.c
mm/memory.c
mm/memory_hotplug.c
mm/mmap.c
mm/mprotect.c
mm/page-writeback.c
mm/page_isolation.c
mm/pagewalk.c
mm/rmap.c
mm/slub.c
net/compat.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/fib6_rules.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp_offload.c
net/iucv/af_iucv.c
net/mac80211/agg-rx.c
net/mac80211/rx.c
net/mac80211/sta_info.h
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/xt_TPROXY.c
net/openvswitch/vport.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/debugfs.c
net/sunrpc/sunrpc_syms.c
net/sunrpc/xprt.c
net/tipc/core.c
security/selinux/selinuxfs.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
tools/testing/selftests/Makefile
virt/kvm/kvm_main.c

index c587a966413e8597da3241f851db92f6e6df1d81..96705616f5820a6d48d6cfda497c7fcf30917e2a 100644 (file)
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
 The kernel does not provide button emulation for such devices but treats
 them as any other INPUT_PROP_BUTTONPAD device.
 
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index 7b4f59c09ee2301d077446f5f9ce9c2b96aa2551..b85d000faeb4067c9ab1ed06690105d459a40a4e 100644 (file)
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
 
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
 
 ABS_MT_BLOB_ID
 
index 358eb0105e008d4b89b0f485141f8b676884fdb3..efbcb50e496954934295b3147e1de9dbdd05ff9e 100644 (file)
@@ -637,8 +637,7 @@ F:      drivers/gpu/drm/radeon/radeon_kfd.h
 F:      include/uapi/linux/kfd_ioctl.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
-L:     amd64-microcode@amd64.org
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/amd*
 
@@ -1186,7 +1185,7 @@ M:        Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/armada38x-rtc
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1361,7 @@ F:        drivers/i2c/busses/i2c-rk3x.c
 F:     drivers/*/*rockchip*
 F:     drivers/*/*/*rockchip*
 F:     sound/soc/rockchip/
+N:     rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1675,8 @@ F:        drivers/misc/eeprom/at24.c
 F:     include/linux/platform_data/at24.h
 
 ATA OVER ETHERNET (AOE) DRIVER
-M:     "Ed L. Cashin" <ecashin@coraid.com>
-W:     http://support.coraid.com/support/linux
+M:     "Ed L. Cashin" <ed.cashin@acm.org>
+W:     http://www.openaoe.org/
 S:     Supported
 F:     Documentation/aoe/
 F:     drivers/block/aoe/
@@ -3252,6 +3252,13 @@ S:       Maintained
 F:     Documentation/hwmon/dme1737
 F:     drivers/hwmon/dme1737.c
 
+DMI/SMBIOS SUPPORT
+M:     Jean Delvare <jdelvare@suse.de>
+S:     Maintained
+F:     drivers/firmware/dmi-id.c
+F:     drivers/firmware/dmi_scan.c
+F:     include/linux/dmi.h
+
 DOCKING STATION DRIVER
 M:     Shaohua Li <shaohua.li@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -5087,7 +5094,7 @@ S:        Supported
 F:     drivers/platform/x86/intel_menlow.c
 
 INTEL IA32 MICROCODE UPDATE SUPPORT
-M:     Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/core*
 F:     arch/x86/kernel/cpu/microcode/intel*
@@ -5128,22 +5135,21 @@ M:      Deepak Saxena <dsaxena@plexity.net>
 S:     Maintained
 F:     drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
+INTEL ETHERNET DRIVERS
 M:     Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-M:     Jesse Brandeburg <jesse.brandeburg@intel.com>
-M:     Bruce Allan <bruce.w.allan@intel.com>
-M:     Carolyn Wyborny <carolyn.wyborny@intel.com>
-M:     Don Skidmore <donald.c.skidmore@intel.com>
-M:     Greg Rose <gregory.v.rose@intel.com>
-M:     Matthew Vick <matthew.vick@intel.com>
-M:     John Ronciak <john.ronciak@intel.com>
-M:     Mitch Williams <mitch.a.williams@intel.com>
-M:     Linux NICS <linux.nics@intel.com>
-L:     e1000-devel@lists.sourceforge.net
+R:     Jesse Brandeburg <jesse.brandeburg@intel.com>
+R:     Shannon Nelson <shannon.nelson@intel.com>
+R:     Carolyn Wyborny <carolyn.wyborny@intel.com>
+R:     Don Skidmore <donald.c.skidmore@intel.com>
+R:     Matthew Vick <matthew.vick@intel.com>
+R:     John Ronciak <john.ronciak@intel.com>
+R:     Mitch Williams <mitch.a.williams@intel.com>
+L:     intel-wired-lan@lists.osuosl.org
 W:     http://www.intel.com/support/feedback.htm
 W:     http://e1000.sourceforge.net/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+Q:     http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
 S:     Supported
 F:     Documentation/networking/e100.txt
 F:     Documentation/networking/e1000.txt
index 14c722f9687766d5f672c74814c7c4298ea85110..da36a3be7969049870d969ae4a623fd7a0c15ee1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 114234e83caa25968e08302b5e61f14e152e2d16..edda76fae83f25219bd74c5487a458e29c765373 100644 (file)
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
               sigset_t *set)
 {
        int err;
-       err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+       err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
        if (!err)
                set_current_blocked(&set);
 
-       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
 
        return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
        /* Don't restart from sigreturn */
        syscall_wont_restart(regs);
 
+       /*
+        * Ensure that sigreturn always returns to user mode (in case the
+        * regs saved on user stack got fudged between save and sigreturn)
+        * Otherwise it is easy to panic the kernel with a custom
+        * signal handler and/or restorer which clobberes the status32/ret
+        * to return to a bogus location in kernel mode.
+        */
+       regs->status32 |= STATUS_U_MASK;
+
        return regs->r0;
 
 badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 
        /*
         * handler returns using sigreturn stub provided already by userpsace
+        * If not, nuke the process right away
         */
-       BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
+       if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+               return 1;
+
        regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
 
        /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
        sigset_t *oldset = sigmask_to_save();
-       int ret;
+       int failed;
 
        /* Set up the stack frame */
-       ret = setup_rt_frame(ksig, oldset, regs);
+       failed = setup_rt_frame(ksig, oldset, regs);
 
-       signal_setup_done(ret, ksig, 0);
+       signal_setup_done(failed, ksig, 0);
 }
 
 void do_signal(struct pt_regs *regs)
index 9f1f09a2bc9bf28894777c08d6968d7faeba55a6..cf4c0c99aa253f3f69ecc08a07f0c5a695e63640 100644 (file)
@@ -619,6 +619,7 @@ config ARCH_PXA
        select GENERIC_CLOCKEVENTS
        select GPIO_PXA
        select HAVE_IDE
+       select IRQ_DOMAIN
        select MULTI_IRQ_HANDLER
        select PLAT_PXA
        select SPARSE_IRQ
index d3a29c1b841727f58200a37ac14b03e96a18cdd2..afe678f6d2e950308b74e403d26a3cc326c2b760 100644 (file)
                >;
        };
 
+       mmc_pins: pinmux_mmc_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0a70, MUX_MODE0)                 /* SD_POW */
+                       DM816X_IOPAD(0x0a74, MUX_MODE0)                 /* SD_CLK */
+                       DM816X_IOPAD(0x0a78, MUX_MODE0)                 /* SD_CMD */
+                       DM816X_IOPAD(0x0a7C, MUX_MODE0)                 /* SD_DAT0 */
+                       DM816X_IOPAD(0x0a80, MUX_MODE0)                 /* SD_DAT1 */
+                       DM816X_IOPAD(0x0a84, MUX_MODE0)                 /* SD_DAT2 */
+                       DM816X_IOPAD(0x0a88, MUX_MODE0)                 /* SD_DAT2 */
+                       DM816X_IOPAD(0x0a8c, MUX_MODE2)                 /* GP1[7] */
+                       DM816X_IOPAD(0x0a90, MUX_MODE2)                 /* GP1[8] */
+               >;
+       };
+
        usb0_pins: pinmux_usb0_pins {
                pinctrl-single,pins = <
                        DM816X_IOPAD(0x0d00, MUX_MODE0)                 /* USB0_DRVVBUS */
 };
 
 &mmc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc_pins>;
        vmmc-supply = <&vmmcsd_fixed>;
+       bus-width = <4>;
+       cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
 };
 
 /* At least dm8168-evm rev c won't support multipoint, later may */
index 3c97b5f2addc12a86639ec7b8e63e92349831236..f35715bc69922591577299730ecf1a105c6642de 100644 (file)
                };
 
                gpio1: gpio@48032000 {
-                       compatible = "ti,omap3-gpio";
+                       compatible = "ti,omap4-gpio";
                        ti,hwmods = "gpio1";
+                       ti,gpio-always-on;
                        reg = <0x48032000 0x1000>;
-                       interrupts = <97>;
+                       interrupts = <96>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                gpio2: gpio@4804c000 {
-                       compatible = "ti,omap3-gpio";
+                       compatible = "ti,omap4-gpio";
                        ti,hwmods = "gpio2";
+                       ti,gpio-always-on;
                        reg = <0x4804c000 0x1000>;
-                       interrupts = <99>;
+                       interrupts = <98>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                gpmc: gpmc@50000000 {
index 127608d79033e32e76143b0707918134f6450283..c4659a979c41387558f8ea660d119568645acbcb 100644 (file)
                                              "wkupclk", "refclk",
                                              "div-clk", "phy-div";
                                #phy-cells = <0>;
-                               ti,hwmods = "pcie1-phy";
                        };
 
                        pcie2_phy: pciephy@4a095000 {
                                              "wkupclk", "refclk",
                                              "div-clk", "phy-div";
                                #phy-cells = <0>;
-                               ti,hwmods = "pcie2-phy";
                                status = "disabled";
                        };
                };
index f4f78c40b56450160ba566cc25bcb7fac9ca489b..3fdc84fddb70d06a2ae26db02fc2c9c9f6d8ee63 100644 (file)
@@ -92,6 +92,8 @@
                        ti,hwmods = "aes";
                        reg = <0x480c5000 0x50>;
                        interrupts = <0>;
+                       dmas = <&sdma 65 &sdma 66>;
+                       dma-names = "tx", "rx";
                };
 
                prm: prm@48306000 {
                        ti,hwmods = "sham";
                        reg = <0x480c3000 0x64>;
                        interrupts = <49>;
+                       dmas = <&sdma 69>;
+                       dma-names = "rx";
                };
 
                smartreflex_core: smartreflex@480cb000 {
index d771f687a13b50abad6f87ad6fb476e1d4377da9..eccc78d3220ba2e59e2994dfb4f5f20d3d3f7a91 100644 (file)
                        "mac_clk_rx", "mac_clk_tx",
                        "clk_mac_ref", "clk_mac_refout",
                        "aclk_mac", "pclk_mac";
+               status = "disabled";
        };
 
        usb_host0_ehci: usb@ff500000 {
index 9d87609567523efd0c0e75033e8bf9e73079d6bd..d9176e6061731b7c42ee792338fab3509482c603 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <0xfff01000 0x1000>;
-                       interrupts = <0 156 4>;
+                       interrupts = <0 155 4>;
                        num-cs = <4>;
                        clocks = <&spi_m_clk>;
                        status = "disabled";
index ab7891c43231de889f40efb79f7a064577c2d6ec..75742f8f96f3d1800d803a7fc2b2d3bd352413f5 100644 (file)
        model = "Olimex A10-OLinuXino-LIME";
        compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
 
+       cpus {
+               cpu0: cpu@0 {
+                       /*
+                        * The A10-Lime is known to be unstable
+                        * when running at 1008 MHz
+                        */
+                       operating-points = <
+                               /* kHz    uV */
+                               912000  1350000
+                               864000  1300000
+                               624000  1250000
+                               >;
+                       cooling-max-level = <2>;
+               };
+       };
+
        soc@01c00000 {
                emac: ethernet@01c0b000 {
                        pinctrl-names = "default";
index 5c2925831f2038318258003ae6cd3736da71c0de..eebb7853e00bbad39916e804453929a2935cc831 100644 (file)
@@ -75,7 +75,6 @@
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1056000 1500000
                                1008000 1400000
                                912000  1350000
                                864000  1300000
@@ -83,7 +82,7 @@
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <4>;
+                       cooling-max-level = <3>;
                };
        };
 
index f8818f1edbbef27f16adadc139b8e46ae49823ad..883cb4873688f2f80ce3036ca2a60f338aa4bb67 100644 (file)
@@ -47,7 +47,6 @@
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1104000 1500000
                                1008000 1400000
                                912000  1350000
                                864000  1300000
@@ -57,7 +56,7 @@
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <6>;
+                       cooling-max-level = <5>;
                };
        };
 
index 3a8530b79f1c46200d2b7ee8bbe343198c7106d8..fdd181792b4beeb553ab55e275361d6bd6ecab07 100644 (file)
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1008000 1450000
                                960000  1400000
                                912000  1400000
                                864000  1300000
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <7>;
+                       cooling-max-level = <6>;
                };
 
                cpu@1 {
index 2a2f4d56e4c85ea599b295a10d09c2bf922bdc79..25f1beea453e0252234b3c62d7f53de75e6ed3f3 100644 (file)
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
                return kasprintf(GFP_KERNEL, "OMAP4");
        else if (soc_is_omap54xx())
                return kasprintf(GFP_KERNEL, "OMAP5");
+       else if (soc_is_am33xx() || soc_is_am335x())
+               return kasprintf(GFP_KERNEL, "AM33xx");
        else if (soc_is_am43xx())
                return kasprintf(GFP_KERNEL, "AM43xx");
        else if (soc_is_dra7xx())
index 0eecd83c624e3d98573d542efb834e7371cffc00..89a7c06570d3adf248a00bf07c3c3aff631d23cf 100644 (file)
@@ -11,6 +11,7 @@
  *  it under the terms of the GNU General Public License version 2 as
  *  published by the Free Software Foundation.
  */
+#include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -40,7 +41,6 @@
 #define ICHP_VAL_IRQ           (1 << 31)
 #define ICHP_IRQ(i)            (((i) >> 16) & 0x7fff)
 #define IPR_VALID              (1 << 31)
-#define IRQ_BIT(n)             (((n) - PXA_IRQ(0)) & 0x1f)
 
 #define MAX_INTERNAL_IRQS      128
 
@@ -51,6 +51,7 @@
 static void __iomem *pxa_irq_base;
 static int pxa_internal_irq_nr;
 static bool cpu_has_ipr;
+static struct irq_domain *pxa_irq_domain;
 
 static inline void __iomem *irq_base(int i)
 {
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
 void pxa_mask_irq(struct irq_data *d)
 {
        void __iomem *base = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t irq = irqd_to_hwirq(d);
        uint32_t icmr = __raw_readl(base + ICMR);
 
-       icmr &= ~(1 << IRQ_BIT(d->irq));
+       icmr &= ~BIT(irq & 0x1f);
        __raw_writel(icmr, base + ICMR);
 }
 
 void pxa_unmask_irq(struct irq_data *d)
 {
        void __iomem *base = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t irq = irqd_to_hwirq(d);
        uint32_t icmr = __raw_readl(base + ICMR);
 
-       icmr |= 1 << IRQ_BIT(d->irq);
+       icmr |= BIT(irq & 0x1f);
        __raw_writel(icmr, base + ICMR);
 }
 
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
        } while (1);
 }
 
-void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
+                      irq_hw_number_t hw)
 {
-       int irq, i, n;
+       void __iomem *base = irq_base(hw / 32);
 
-       BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+       /* initialize interrupt priority */
+       if (cpu_has_ipr)
+               __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
+
+       irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
+                                handle_level_irq);
+       irq_set_chip_data(virq, base);
+       set_irq_flags(virq, IRQF_VALID);
+
+       return 0;
+}
+
+static struct irq_domain_ops pxa_irq_ops = {
+       .map    = pxa_irq_map,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static __init void
+pxa_init_irq_common(struct device_node *node, int irq_nr,
+                   int (*fn)(struct irq_data *, unsigned int))
+{
+       int n;
 
        pxa_internal_irq_nr = irq_nr;
-       cpu_has_ipr = !cpu_is_pxa25x();
-       pxa_irq_base = io_p2v(0x40d00000);
+       pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
+                                              PXA_IRQ(0), 0,
+                                              &pxa_irq_ops, NULL);
+       if (!pxa_irq_domain)
+               panic("Unable to add PXA IRQ domain\n");
+       irq_set_default_host(pxa_irq_domain);
 
        for (n = 0; n < irq_nr; n += 32) {
                void __iomem *base = irq_base(n >> 5);
 
                __raw_writel(0, base + ICMR);   /* disable all IRQs */
                __raw_writel(0, base + ICLR);   /* all IRQs are IRQ, not FIQ */
-               for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
-                       /* initialize interrupt priority */
-                       if (cpu_has_ipr)
-                               __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
-
-                       irq = PXA_IRQ(i);
-                       irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
-                                                handle_level_irq);
-                       irq_set_chip_data(irq, base);
-                       set_irq_flags(irq, IRQF_VALID);
-               }
        }
-
        /* only unmasked interrupts kick us out of idle */
        __raw_writel(1, irq_base(0) + ICCR);
 
        pxa_internal_irq_chip.irq_set_wake = fn;
 }
 
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+{
+       BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+
+       pxa_irq_base = io_p2v(0x40d00000);
+       cpu_has_ipr = !cpu_is_pxa25x();
+       pxa_init_irq_common(NULL, irq_nr, fn);
+}
+
 #ifdef CONFIG_PM
 static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
 static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
 };
 
 #ifdef CONFIG_OF
-static struct irq_domain *pxa_irq_domain;
-
-static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
-                      irq_hw_number_t hw)
-{
-       void __iomem *base = irq_base(hw / 32);
-
-       /* initialize interrupt priority */
-       if (cpu_has_ipr)
-               __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
-
-       irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
-                                handle_level_irq);
-       irq_set_chip_data(hw, base);
-       set_irq_flags(hw, IRQF_VALID);
-
-       return 0;
-}
-
-static struct irq_domain_ops pxa_irq_ops = {
-       .map    = pxa_irq_map,
-       .xlate  = irq_domain_xlate_onecell,
-};
-
 static const struct of_device_id intc_ids[] __initconst = {
        { .compatible = "marvell,pxa-intc", },
        {}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
 {
        struct device_node *node;
        struct resource res;
-       int n, ret;
+       int ret;
 
        node = of_find_matching_node(NULL, intc_ids);
        if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
                return;
        }
 
-       pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
-                                              &pxa_irq_ops, NULL);
-       if (!pxa_irq_domain)
-               panic("Unable to add PXA IRQ domain\n");
-
-       irq_set_default_host(pxa_irq_domain);
-
-       for (n = 0; n < pxa_internal_irq_nr; n += 32) {
-               void __iomem *base = irq_base(n >> 5);
-
-               __raw_writel(0, base + ICMR);   /* disable all IRQs */
-               __raw_writel(0, base + ICLR);   /* all IRQs are IRQ, not FIQ */
-       }
-
-       /* only unmasked interrupts kick us out of idle */
-       __raw_writel(1, irq_base(0) + ICCR);
-
-       pxa_internal_irq_chip.irq_set_wake = fn;
+       pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
 }
 #endif /* CONFIG_OF */
index 205f9bf3821e30236d579021f09d1157a6c3fee5..ac2ae5c71ab45b7428440ee8925b121533ba9509 100644 (file)
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
 };
 
 static struct platform_device can_regulator_device = {
-       .name   = "reg-fixed-volage",
+       .name   = "reg-fixed-voltage",
        .id     = 0,
        .dev    = {
                .platform_data  = &can_regulator_pdata,
index a77604fbaf257acef8e2c546dd267a5e2ef501af..81502b90dd9130240bd716d4bfb866b9d5ac5efe 100644 (file)
@@ -1,10 +1,12 @@
 menuconfig ARCH_SUNXI
        bool "Allwinner SoCs" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_HAS_RESET_CONTROLLER
        select CLKSRC_MMIO
        select GENERIC_IRQ_CHIP
        select PINCTRL
        select SUN4I_TIMER
+       select RESET_CONTROLLER
 
 if ARCH_SUNXI
 
@@ -20,10 +22,8 @@ config MACH_SUN5I
 config MACH_SUN6I
        bool "Allwinner A31 (sun6i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
        select MFD_SUN6I_PRCM
-       select RESET_CONTROLLER
        select SUN5I_HSTIMER
 
 config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
 config MACH_SUN8I
        bool "Allwinner A23 (sun8i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
        select MFD_SUN6I_PRCM
-       select RESET_CONTROLLER
 
 config MACH_SUN9I
        bool "Allwinner (sun9i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
-       select RESET_CONTROLLER
 
 endif
index db10169a08de78980d0ad17b71224f96bbfb1d5c..8ca94d379bc35f2020dea0ea3e708b0fbb0bb827 100644 (file)
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        const struct of_device_id *match;
        const struct dmtimer_platform_data *pdata;
+       int ret;
 
        match = of_match_device(of_match_ptr(omap_timer_match), dev);
        pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        }
 
        if (!timer->reserved) {
-               pm_runtime_get_sync(dev);
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0) {
+                       dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+                               __func__);
+                       goto err_get_sync;
+               }
                __omap_dm_timer_init_regs(timer);
                pm_runtime_put(dev);
        }
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        dev_dbg(dev, "Device Probed.\n");
 
        return 0;
+
+err_get_sync:
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
+       return ret;
 }
 
 /**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
                }
        spin_unlock_irqrestore(&dm_timer_lock, flags);
 
+       pm_runtime_disable(&pdev->dev);
+
        return ret;
 }
 
index ea2b5666a16f5a3e6b194559e21fc2538f58b7f0..c9b89efe0f562a9dd7cd1a60126f42cb8948ff23 100644 (file)
@@ -8,7 +8,7 @@
  */
 
        /* SoC fixed clocks */
-       soc_uartclk: refclk72738khz {
+       soc_uartclk: refclk7273800hz {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <7273800>;
index cb9593079f29763c34f7e68fa89737355ac03adb..d8c25b7b18fbf42ddc66ab888fc22c530d752d15 100644 (file)
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
        __ret; \
 })
 
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-       cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
-                               o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n)                      \
+({                                                             \
+       typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
+       preempt_disable();                                      \
+       __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);       \
+       preempt_enable();                                       \
+       __ret;                                                  \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)          \
+({                                                                     \
+       int __ret;                                                      \
+       preempt_disable();                                              \
+       __ret = cmpxchg_double_local(   raw_cpu_ptr(&(ptr1)),           \
+                                       raw_cpu_ptr(&(ptr2)),           \
+                                       o1, o2, n1, n2);                \
+       preempt_enable();                                               \
+       __ret;                                                          \
+})
 
 #define cmpxchg64(ptr,o,n)             cmpxchg((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)       cmpxchg_local((ptr),(o),(n))
index a9eee33dfa62dc031ab8262c275eba79f8609bac..101a42bde728a8b9547bca989b696d473dcd7e42 100644 (file)
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        unsigned int cpu = smp_processor_id();
 
+       /*
+        * init_mm.pgd does not contain any user mappings and it is always
+        * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+        */
+       if (next == &init_mm) {
+               cpu_set_reserved_ttbr0();
+               return;
+       }
+
        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
                check_and_switch_context(next, tsk);
 }
index 09da25bc596fd0bdccdf03e94b37a4c81e0cc633..4fde8c1df97ffb46d9d2039a11cf574d05ed5a92 100644 (file)
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
        return ret;
 }
 
+#define _percpu_read(pcp)                                              \
+({                                                                     \
+       typeof(pcp) __retval;                                           \
+       preempt_disable();                                              \
+       __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
+                                             sizeof(pcp));             \
+       preempt_enable();                                               \
+       __retval;                                                       \
+})
+
+#define _percpu_write(pcp, val)                                                \
+do {                                                                   \
+       preempt_disable();                                              \
+       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
+                               sizeof(pcp));                           \
+       preempt_enable();                                               \
+} while(0)                                                             \
+
+#define _pcp_protect(operation, pcp, val)                      \
+({                                                             \
+       typeof(pcp) __retval;                                   \
+       preempt_disable();                                      \
+       __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),  \
+                                         (val), sizeof(pcp));  \
+       preempt_enable();                                       \
+       __retval;                                               \
+})
+
 #define _percpu_add(pcp, val) \
-       __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_add, pcp, val)
 
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
 
 #define _percpu_and(pcp, val) \
-       __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_and, pcp, val)
 
 #define _percpu_or(pcp, val) \
-       __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp))        \
-       (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
-       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+       _pcp_protect(__percpu_or, pcp, val)
 
 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
-       (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+       _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
 
 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
index 9359e504844258b12d2efd9c752859813210dd1a..d5779b0ec5730a01963a4a86940d1f5eb500084e 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_METAG_IO_H
 
 #include <linux/types.h>
+#include <asm/pgtable-bits.h>
 
 #define IO_SPACE_LIMIT  0
 
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644 (file)
index 0000000..25ba672
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Meta page table definitions.
+ */
+
+#ifndef _METAG_PGTABLE_BITS_H
+#define _METAG_PGTABLE_BITS_H
+
+#include <asm/metag_mem.h>
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT          MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE            MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV             MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE       MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT     MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1    0x020
+#define _PAGE_CACHE_CTRL0      0x040
+#define _PAGE_CACHE_CTRL1      0x080
+#define _PAGE_ALWAYS_ZERO_2    0x100
+#define _PAGE_ALWAYS_ZERO_3    0x200
+#define _PAGE_ALWAYS_ZERO_4    0x400
+#define _PAGE_ALWAYS_ZERO_5    0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used.  Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED         _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY            _PAGE_ALWAYS_ZERO_2
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL           _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0       (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1       (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2       (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3       (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE                (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK       (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+                                _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK         (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT         1
+#define _PAGE_SZ_4K            (0x0)
+#define _PAGE_SZ_8K            (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K           (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K           (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K           (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K          (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K          (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K          (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M            (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M            (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M            (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK          (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ               (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ               (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ               (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE            (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_4M)
+#endif
+
+#endif /* _METAG_PGTABLE_BITS_H */
index d0604c0a87022231f37157531e1175894545ee4a..ffa3a3a2ecadda8bed7cf5e7b1508cd98c43abf8 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef _METAG_PGTABLE_H
 #define _METAG_PGTABLE_H
 
+#include <asm/pgtable-bits.h>
 #include <asm-generic/pgtable-nopmd.h>
 
 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
 #define VMALLOC_END            0x7FFFFFFF
 #endif
 
-/*
- * Definitions for MMU descriptors
- *
- * These are the hardware bits in the MMCU pte entries.
- * Derived from the Meta toolkit headers.
- */
-#define _PAGE_PRESENT          MMCU_ENTRY_VAL_BIT
-#define _PAGE_WRITE            MMCU_ENTRY_WR_BIT
-#define _PAGE_PRIV             MMCU_ENTRY_PRIV_BIT
-/* Write combine bit - this can cause writes to occur out of order */
-#define _PAGE_WR_COMBINE       MMCU_ENTRY_WRC_BIT
-/* Sys coherent bit - this bit is never used by Linux */
-#define _PAGE_SYS_COHERENT     MMCU_ENTRY_SYS_BIT
-#define _PAGE_ALWAYS_ZERO_1    0x020
-#define _PAGE_CACHE_CTRL0      0x040
-#define _PAGE_CACHE_CTRL1      0x080
-#define _PAGE_ALWAYS_ZERO_2    0x100
-#define _PAGE_ALWAYS_ZERO_3    0x200
-#define _PAGE_ALWAYS_ZERO_4    0x400
-#define _PAGE_ALWAYS_ZERO_5    0x800
-
-/* These are software bits that we stuff into the gaps in the hardware
- * pte entries that are not used.  Note, these DO get stored in the actual
- * hardware, but the hardware just does not use them.
- */
-#define _PAGE_ACCESSED         _PAGE_ALWAYS_ZERO_1
-#define _PAGE_DIRTY            _PAGE_ALWAYS_ZERO_2
-
-/* Pages owned, and protected by, the kernel. */
-#define _PAGE_KERNEL           _PAGE_PRIV
-
-/* No cacheing of this page */
-#define _PAGE_CACHE_WIN0       (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
-/* burst cacheing - good for data streaming */
-#define _PAGE_CACHE_WIN1       (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
-/* One cache way per thread */
-#define _PAGE_CACHE_WIN2       (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
-/* Full on cacheing */
-#define _PAGE_CACHE_WIN3       (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
-
-#define _PAGE_CACHEABLE                (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
-
-/* which bits are used for cache control ... */
-#define _PAGE_CACHE_MASK       (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
-                                _PAGE_WR_COMBINE)
-
-/* This is a mask of the bits that pte_modify is allowed to change. */
-#define _PAGE_CHG_MASK         (PAGE_MASK)
-
-#define _PAGE_SZ_SHIFT         1
-#define _PAGE_SZ_4K            (0x0)
-#define _PAGE_SZ_8K            (0x1 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_16K           (0x2 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_32K           (0x3 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_64K           (0x4 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_128K          (0x5 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_256K          (0x6 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_512K          (0x7 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_1M            (0x8 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_2M            (0x9 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_4M            (0xa << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_MASK          (0xf << _PAGE_SZ_SHIFT)
-
-#if defined(CONFIG_PAGE_SIZE_4K)
-#define _PAGE_SZ               (_PAGE_SZ_4K)
-#elif defined(CONFIG_PAGE_SIZE_8K)
-#define _PAGE_SZ               (_PAGE_SZ_8K)
-#elif defined(CONFIG_PAGE_SIZE_16K)
-#define _PAGE_SZ               (_PAGE_SZ_16K)
-#endif
-#define _PAGE_TABLE            (_PAGE_SZ | _PAGE_PRESENT)
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_8K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_16K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_32K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_64K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_128K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_256K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_512K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_1M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_2M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_4M)
-#endif
-
 /*
  * The Linux memory management assumes a three-level page table setup. On
  * Meta, we use that, but "fold" the mid level into the top-level page
index 601f1a476c38cffa4e471dd4c5c42d33344d7902..779dcae4717a64ec1ed670fa0e38d6cd3a67b4a7 100644 (file)
@@ -417,6 +417,7 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS32_R3_5
+       select SYS_HAS_CPU_MIPS32_R5
        select SYS_HAS_CPU_MIPS32_R6
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
@@ -426,6 +427,7 @@ config MIPS_MALTA
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
+       select SYS_SUPPORTS_HIGHMEM
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_MICROMIPS
        select SYS_SUPPORTS_MIPS_CMP
@@ -1640,6 +1642,33 @@ config CPU_MIPS32_3_5_EVA
          One of its primary benefits is an increase in the maximum size
          of lowmem (up to 3GB). If unsure, say 'N' here.
 
+config CPU_MIPS32_R5_FEATURES
+       bool "MIPS32 Release 5 Features"
+       depends on SYS_HAS_CPU_MIPS32_R5
+       depends on CPU_MIPS32_R2
+       help
+         Choose this option to build a kernel for release 2 or later of the
+         MIPS32 architecture including features from release 5 such as
+         support for Extended Physical Addressing (XPA).
+
+config CPU_MIPS32_R5_XPA
+       bool "Extended Physical Addressing (XPA)"
+       depends on CPU_MIPS32_R5_FEATURES
+       depends on !EVA
+       depends on !PAGE_SIZE_4KB
+       depends on SYS_SUPPORTS_HIGHMEM
+       select XPA
+       select HIGHMEM
+       select ARCH_PHYS_ADDR_T_64BIT
+       default n
+       help
+         Choose this option if you want to enable the Extended Physical
+         Addressing (XPA) on your MIPS32 core (such as P5600 series). The
+         benefit is to increase physical addressing equal to or greater
+         than 40 bits. Note that this has the side effect of turning on
+         64-bit addressing which in turn makes the PTEs 64-bit in size.
+         If unsure, say 'N' here.
+
 if CPU_LOONGSON2F
 config CPU_NOP_WORKAROUNDS
        bool
@@ -1743,6 +1772,9 @@ config SYS_HAS_CPU_MIPS32_R2
 config SYS_HAS_CPU_MIPS32_R3_5
        bool
 
+config SYS_HAS_CPU_MIPS32_R5
+       bool
+
 config SYS_HAS_CPU_MIPS32_R6
        bool
 
@@ -1880,6 +1912,9 @@ config CPU_MIPSR6
 config EVA
        bool
 
+config XPA
+       bool
+
 config SYS_SUPPORTS_32BIT_KERNEL
        bool
 config SYS_SUPPORTS_64BIT_KERNEL
@@ -2116,7 +2151,7 @@ config MIPSR2_TO_R6_EMULATOR
        help
          Choose this option if you want to run non-R6 MIPS userland code.
          Even if you say 'Y' here, the emulator will still be disabled by
-         default. You can enable it using the 'mipsr2emul' kernel option.
+         default. You can enable it using the 'mipsr2emu' kernel option.
          The only reason this is a build-time option is to save ~14K from the
          final kernel image.
 comment "MIPS R2-to-R6 emulator is only available for UP kernels"
@@ -2186,7 +2221,7 @@ config MIPS_CMP
 
 config MIPS_CPS
        bool "MIPS Coherent Processing System support"
-       depends on SYS_SUPPORTS_MIPS_CPS
+       depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
        select MIPS_CM
        select MIPS_CPC
        select MIPS_CPS_PM if HOTPLUG_CPU
index e644ac2d6501d1182c4cc8f12cef19c61529936a..198fd8d120a0cbc1e6e9333b44937b4fdc24578c 100644 (file)
@@ -197,11 +197,17 @@ endif
 # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
 # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
 # been fixed properly.
-mips-cflags                            := "$(cflags-y)"
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS)         += $(call cc-option,$(mips-cflags),-mmicromips)
+mips-cflags                            := $(cflags-y)
+ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
+smartmips-ase                          := $(call cc-option-yn,$(mips-cflags) -msmartmips)
+cflags-$(smartmips-ase)                        += -msmartmips -Wa,--no-warn
+endif
+ifeq ($(CONFIG_CPU_MICROMIPS),y)
+micromips-ase                          := $(call cc-option-yn,$(mips-cflags) -mmicromips)
+cflags-$(micromips-ase)                        += -mmicromips
+endif
 ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa                          := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+toolchain-msa                          := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
 cflags-$(toolchain-msa)                        += -DTOOLCHAIN_SUPPORTS_MSA
 endif
 
index 41b9736c3c054830376c27e77d112fea4ec2f8d3..bd56415f2f3b46f8d13826b43396aa2f898475d7 100644 (file)
@@ -235,8 +235,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
        }
 
        if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
-           bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
-               for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+           bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
+               for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
                        if (!strstarts(buf1, e2->value1) &&
                            !strcmp(buf2, e2->value2))
                                return &e2->board;
index e1f27d653f603d17413b04aa0e9f540f10cf504a..7019e2967009e98e6f6191c1e86969d4663a957f 100644 (file)
@@ -17,7 +17,6 @@
 #include <bcm63xx_cpu.h>
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
-#include <bcm63xx_gpio.h>
 
 void __init prom_init(void)
 {
@@ -53,9 +52,6 @@ void __init prom_init(void)
        reg &= ~mask;
        bcm_perf_writel(reg, PERF_CKCTL_REG);
 
-       /* register gpiochip */
-       bcm63xx_gpio_init();
-
        /* do low level board init */
        board_prom_init();
 
index 6660c7ddf87b9cf37baf6100878b52626dcf9d47..240fb4ffa55c8625ef26354e29cff25c58f63b68 100644 (file)
@@ -20,6 +20,7 @@
 #include <bcm63xx_cpu.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
+#include <bcm63xx_gpio.h>
 
 void bcm63xx_machine_halt(void)
 {
@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
 
 int __init bcm63xx_register_devices(void)
 {
+       /* register gpiochip */
+       bcm63xx_gpio_init();
+
        return board_register_devices();
 }
 
index 7d8987818ccf51ed6aa82463fbe3de5dd599557e..d8960d46417b07ec5e3b0603b2f934efe4c1f54c 100644 (file)
@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
                swiotlbsize = 64 * (1<<20);
        }
 #endif
-#ifdef CONFIG_USB_OCTEON_OHCI
+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
        /* OCTEON II ohci is only 32-bit. */
        if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
                swiotlbsize = 64 * (1<<20);
index e41c56e375b136969289bcc233409d42b393b231..1e38f0e1ea3e68c8b3c77e9ff7a798a2bc0bfe1d 100644 (file)
 #define __ASM_ASM_EVA_H
 
 #ifndef __ASSEMBLY__
+
+/* Kernel variants */
+
+#define kernel_cache(op, base)         "cache " op ", " base "\n"
+#define kernel_ll(reg, addr)           "ll " reg ", " addr "\n"
+#define kernel_sc(reg, addr)           "sc " reg ", " addr "\n"
+#define kernel_lw(reg, addr)           "lw " reg ", " addr "\n"
+#define kernel_lwl(reg, addr)          "lwl " reg ", " addr "\n"
+#define kernel_lwr(reg, addr)          "lwr " reg ", " addr "\n"
+#define kernel_lh(reg, addr)           "lh " reg ", " addr "\n"
+#define kernel_lb(reg, addr)           "lb " reg ", " addr "\n"
+#define kernel_lbu(reg, addr)          "lbu " reg ", " addr "\n"
+#define kernel_sw(reg, addr)           "sw " reg ", " addr "\n"
+#define kernel_swl(reg, addr)          "swl " reg ", " addr "\n"
+#define kernel_swr(reg, addr)          "swr " reg ", " addr "\n"
+#define kernel_sh(reg, addr)           "sh " reg ", " addr "\n"
+#define kernel_sb(reg, addr)           "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr)           user_sw(reg, addr)
+#define kernel_ld(reg, addr)           user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr)           "sd " reg", " addr "\n"
+#define kernel_ld(reg, addr)           "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
 #ifdef CONFIG_EVA
 
 #define __BUILD_EVA_INSN(insn, reg, addr)                              \
 
 #else
 
-#define user_cache(op, base)           "cache " op ", " base "\n"
-#define user_ll(reg, addr)             "ll " reg ", " addr "\n"
-#define user_sc(reg, addr)             "sc " reg ", " addr "\n"
-#define user_lw(reg, addr)             "lw " reg ", " addr "\n"
-#define user_lwl(reg, addr)            "lwl " reg ", " addr "\n"
-#define user_lwr(reg, addr)            "lwr " reg ", " addr "\n"
-#define user_lh(reg, addr)             "lh " reg ", " addr "\n"
-#define user_lb(reg, addr)             "lb " reg ", " addr "\n"
-#define user_lbu(reg, addr)            "lbu " reg ", " addr "\n"
-#define user_sw(reg, addr)             "sw " reg ", " addr "\n"
-#define user_swl(reg, addr)            "swl " reg ", " addr "\n"
-#define user_swr(reg, addr)            "swr " reg ", " addr "\n"
-#define user_sh(reg, addr)             "sh " reg ", " addr "\n"
-#define user_sb(reg, addr)             "sb " reg ", " addr "\n"
+#define user_cache(op, base)           kernel_cache(op, base)
+#define user_ll(reg, addr)             kernel_ll(reg, addr)
+#define user_sc(reg, addr)             kernel_sc(reg, addr)
+#define user_lw(reg, addr)             kernel_lw(reg, addr)
+#define user_lwl(reg, addr)            kernel_lwl(reg, addr)
+#define user_lwr(reg, addr)            kernel_lwr(reg, addr)
+#define user_lh(reg, addr)             kernel_lh(reg, addr)
+#define user_lb(reg, addr)             kernel_lb(reg, addr)
+#define user_lbu(reg, addr)            kernel_lbu(reg, addr)
+#define user_sw(reg, addr)             kernel_sw(reg, addr)
+#define user_swl(reg, addr)            kernel_swl(reg, addr)
+#define user_swr(reg, addr)            kernel_swr(reg, addr)
+#define user_sh(reg, addr)             kernel_sh(reg, addr)
+#define user_sb(reg, addr)             kernel_sb(reg, addr)
 
 #ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr)             user_sw(reg, addr)
-#define user_ld(reg, addr)             user_lw(reg, addr)
+#define user_sd(reg, addr)             kernel_sw(reg, addr)
+#define user_ld(reg, addr)             kernel_lw(reg, addr)
 #else
-#define user_sd(reg, addr)             "sd " reg", " addr "\n"
-#define user_ld(reg, addr)             "ld " reg", " addr "\n"
+#define user_sd(reg, addr)             kernel_sd(reg, addr)
+#define user_ld(reg, addr)             kernel_ld(reg, addr)
 #endif /* CONFIG_32BIT */
 
 #endif /* CONFIG_EVA */
 
 #else /* __ASSEMBLY__ */
 
+#define kernel_cache(op, base)         cache op, base
+#define kernel_ll(reg, addr)           ll reg, addr
+#define kernel_sc(reg, addr)           sc reg, addr
+#define kernel_lw(reg, addr)           lw reg, addr
+#define kernel_lwl(reg, addr)          lwl reg, addr
+#define kernel_lwr(reg, addr)          lwr reg, addr
+#define kernel_lh(reg, addr)           lh reg, addr
+#define kernel_lb(reg, addr)           lb reg, addr
+#define kernel_lbu(reg, addr)          lbu reg, addr
+#define kernel_sw(reg, addr)           sw reg, addr
+#define kernel_swl(reg, addr)          swl reg, addr
+#define kernel_swr(reg, addr)          swr reg, addr
+#define kernel_sh(reg, addr)           sh reg, addr
+#define kernel_sb(reg, addr)           sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr)           user_sw(reg, addr)
+#define kernel_ld(reg, addr)           user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr)           sd reg, addr
+#define kernel_ld(reg, addr)           ld reg, addr
+#endif /* CONFIG_32BIT */
+
 #ifdef CONFIG_EVA
 
 #define __BUILD_EVA_INSN(insn, reg, addr)                      \
 #define user_sd(reg, addr)             user_sw(reg, addr)
 #else
 
-#define user_cache(op, base)           cache op, base
-#define user_ll(reg, addr)             ll reg, addr
-#define user_sc(reg, addr)             sc reg, addr
-#define user_lw(reg, addr)             lw reg, addr
-#define user_lwl(reg, addr)            lwl reg, addr
-#define user_lwr(reg, addr)            lwr reg, addr
-#define user_lh(reg, addr)             lh reg, addr
-#define user_lb(reg, addr)             lb reg, addr
-#define user_lbu(reg, addr)            lbu reg, addr
-#define user_sw(reg, addr)             sw reg, addr
-#define user_swl(reg, addr)            swl reg, addr
-#define user_swr(reg, addr)            swr reg, addr
-#define user_sh(reg, addr)             sh reg, addr
-#define user_sb(reg, addr)             sb reg, addr
+#define user_cache(op, base)           kernel_cache(op, base)
+#define user_ll(reg, addr)             kernel_ll(reg, addr)
+#define user_sc(reg, addr)             kernel_sc(reg, addr)
+#define user_lw(reg, addr)             kernel_lw(reg, addr)
+#define user_lwl(reg, addr)            kernel_lwl(reg, addr)
+#define user_lwr(reg, addr)            kernel_lwr(reg, addr)
+#define user_lh(reg, addr)             kernel_lh(reg, addr)
+#define user_lb(reg, addr)             kernel_lb(reg, addr)
+#define user_lbu(reg, addr)            kernel_lbu(reg, addr)
+#define user_sw(reg, addr)             kernel_sw(reg, addr)
+#define user_swl(reg, addr)            kernel_swl(reg, addr)
+#define user_swr(reg, addr)            kernel_swr(reg, addr)
+#define user_sh(reg, addr)             kernel_sh(reg, addr)
+#define user_sb(reg, addr)             kernel_sb(reg, addr)
 
 #ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr)             user_sw(reg, addr)
-#define user_ld(reg, addr)             user_lw(reg, addr)
+#define user_sd(reg, addr)             kernel_sw(reg, addr)
+#define user_ld(reg, addr)             kernel_lw(reg, addr)
 #else
-#define user_sd(reg, addr)             sd reg, addr
-#define user_ld(reg, addr)             ld reg, addr
+#define user_sd(reg, addr)             kernel_sd(reg, addr)
+#define user_ld(reg, addr)             kernel_sd(reg, addr)
 #endif /* CONFIG_32BIT */
 
 #endif /* CONFIG_EVA */
index e08381a37f8b67a644df6df19a9884c11dbf3e39..37d5cf9b5706fa87e2e38796363c7fbe8b510527 100644 (file)
  *  - flush_icache_all() flush the entire instruction cache
  *  - flush_data_cache_page() flushes a page from the data cache
  */
+
+ /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty                        PG_arch_1
+
+#define Page_dcache_dirty(page)                \
+       test_bit(PG_dcache_dirty, &(page)->flags)
+#define SetPageDcacheDirty(page)       \
+       set_bit(PG_dcache_dirty, &(page)->flags)
+#define ClearPageDcacheDirty(page)     \
+       clear_bit(PG_dcache_dirty, &(page)->flags)
+
 extern void (*flush_cache_all)(void);
 extern void (*__flush_cache_all)(void);
 extern void (*flush_cache_mm)(struct mm_struct *mm);
@@ -41,9 +55,10 @@ extern void __flush_dcache_page(struct page *page);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 static inline void flush_dcache_page(struct page *page)
 {
-       if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+       if (cpu_has_dc_aliases)
                __flush_dcache_page(page);
-
+       else if (!cpu_has_ic_fills_f_dc)
+               SetPageDcacheDirty(page);
 }
 
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
@@ -61,6 +76,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 static inline void flush_icache_page(struct vm_area_struct *vma,
        struct page *page)
 {
+       if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
+           Page_dcache_dirty(page))
+               __flush_dcache_page(page);
 }
 
 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
@@ -95,19 +113,6 @@ extern void (*flush_icache_all)(void);
 extern void (*local_flush_data_cache_page)(void * addr);
 extern void (*flush_data_cache_page)(unsigned long addr);
 
-/*
- * This flag is used to indicate that the page pointed to by a pte
- * is dirty and requires cleaning before returning it to the user.
- */
-#define PG_dcache_dirty                        PG_arch_1
-
-#define Page_dcache_dirty(page)                \
-       test_bit(PG_dcache_dirty, &(page)->flags)
-#define SetPageDcacheDirty(page)       \
-       set_bit(PG_dcache_dirty, &(page)->flags)
-#define ClearPageDcacheDirty(page)     \
-       clear_bit(PG_dcache_dirty, &(page)->flags)
-
 /* Run kernel code uncached, useful for cache probing functions. */
 unsigned long run_uncached(void *func);
 
index fc2ad332541c2978786b15fc45dd793ff65acffe..5aeaf19c26b0fa5db9282951d237fbe9738fc1f0 100644 (file)
 # endif
 #endif
 
+#ifndef cpu_has_xpa
+#define cpu_has_xpa            (cpu_data[0].options & MIPS_CPU_XPA)
+#endif
 #ifndef cpu_has_vtag_icache
 #define cpu_has_vtag_icache    (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
 #endif
 /* MIPSR2 and MIPSR6 have a lot of similarities */
 #define cpu_has_mips_r2_r6     (cpu_has_mips_r2 | cpu_has_mips_r6)
 
+/*
+ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
+ *
+ * Returns non-zero value if the current processor implementation requires
+ * an IHB instruction to deal with an instruction hazard as per MIPS R2
+ * architecture specification, zero otherwise.
+ */
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
+#define cpu_has_mips_r2_exec_hazard                                    \
+({                                                                     \
+       int __res;                                                      \
+                                                                       \
+       switch (current_cpu_type()) {                                   \
+       case CPU_M14KC:                                                 \
+       case CPU_74K:                                                   \
+       case CPU_1074K:                                                 \
+       case CPU_PROAPTIV:                                              \
+       case CPU_P5600:                                                 \
+       case CPU_M5150:                                                 \
+       case CPU_QEMU_GENERIC:                                          \
+       case CPU_CAVIUM_OCTEON:                                         \
+       case CPU_CAVIUM_OCTEON_PLUS:                                    \
+       case CPU_CAVIUM_OCTEON2:                                        \
+       case CPU_CAVIUM_OCTEON3:                                        \
+               __res = 0;                                              \
+               break;                                                  \
+                                                                       \
+       default:                                                        \
+               __res = 1;                                              \
+       }                                                               \
+                                                                       \
+       __res;                                                          \
+})
 #endif
 
 /*
index fd2e893e9d9f7d0a28fd48b04d3ea10be8b090a5..e3adca1d0b9956060845a803d77fad2a1f21f75f 100644 (file)
@@ -377,7 +377,8 @@ enum cpu_type_enum {
 #define MIPS_CPU_MAAR          0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE           0x800000000ull /* FRE & UFE bits implemented */
 #define MIPS_CPU_RW_LLB                0x1000000000ull /* LLADDR/LLB writes are allowed */
-#define MIPS_CPU_CDMM          0x2000000000ull /* CPU has Common Device Memory Map */
+#define MIPS_CPU_XPA           0x2000000000ull /* CPU supports Extended Physical Addressing */
+#define MIPS_CPU_CDMM          0x4000000000ull /* CPU has Common Device Memory Map */
 
 /*
  * CPU ASE encodings
index 612cf519bd889db63f9265eb2dde151f551ba7ac..9a74248e782132ff34ef9c7faf2669e389ba139e 100644 (file)
@@ -297,6 +297,9 @@ do {                                                                        \
        if (personality(current->personality) != PER_LINUX)             \
                set_personality(PER_LINUX);                             \
                                                                        \
+       clear_thread_flag(TIF_HYBRID_FPREGS);                           \
+       set_thread_flag(TIF_32BIT_FPREGS);                              \
+                                                                       \
        mips_set_personality_fp(state);                                 \
                                                                        \
        current->thread.abi = &mips_abi;                                \
@@ -324,6 +327,8 @@ do {                                                                        \
        do {                                                            \
                set_thread_flag(TIF_32BIT_REGS);                        \
                set_thread_flag(TIF_32BIT_ADDR);                        \
+               clear_thread_flag(TIF_HYBRID_FPREGS);                   \
+               set_thread_flag(TIF_32BIT_FPREGS);                      \
                                                                        \
                mips_set_personality_fp(state);                         \
                                                                        \
index fa1f3cfbae8d70aa9dc0db696282bb1f8ed8f76f..d68e685cde6032729a5632171e163f102f6a4f42 100644 (file)
@@ -50,7 +50,6 @@
 #define cpu_has_mips32r2       0
 #define cpu_has_mips64r1       0
 #define cpu_has_mips64r2       1
-#define cpu_has_mips_r2_exec_hazard 0
 #define cpu_has_dsp            0
 #define cpu_has_dsp2           0
 #define cpu_has_mipsmt         0
index 64ba56a028431eb89750e16731345dcf5bcb4e72..1884609741a83d6bbd0af7cf5162cce051bac3ba 100644 (file)
@@ -11,9 +11,6 @@
 
 #include <linux/pci.h>
 
-/* Some PCI cards require delays when accessing config space. */
-#define PCI_CONFIG_SPACE_DELAY 10000
-
 /*
  * The physical memory base mapped by BAR1.  256MB at the end of the
  * first 4GB.
index a6be006b6f754072d52ff026bb77efb7f0028d31..7d56686c0e62402f6f27e79c88286f5081d9a812 100644 (file)
@@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp)
 
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #define pte_page(x)            pfn_to_page(pte_pfn(x))
-#define pte_pfn(x)             ((unsigned long)((x).pte_high >> 6))
+#define pte_pfn(x)             (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
 static inline pte_t
 pfn_pte(unsigned long pfn, pgprot_t prot)
 {
        pte_t pte;
-       pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
-       pte.pte_low = pgprot_val(prot);
+
+       pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
+                               (pgprot_val(prot) & ~_PFNX_MASK);
+       pte.pte_high = (pfn << _PFN_SHIFT) |
+                               (pgprot_val(prot) & ~_PFN_MASK);
        return pte;
 }
 
@@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
 /* Swap entries must have VALID and GLOBAL bits cleared. */
-#define __swp_type(x)                  (((x).val >> 2) & 0x1f)
-#define __swp_offset(x)                         ((x).val >> 7)
-#define __swp_entry(type,offset)       ((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
+#define __swp_type(x)                  (((x).val >> 4) & 0x1f)
+#define __swp_offset(x)                         ((x).val >> 9)
+#define __swp_entry(type,offset)       ((swp_entry_t)  { ((type) << 4) | ((offset) << 9) })
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_high })
 #define __swp_entry_to_pte(x)          ((pte_t) { 0, (x).val })
 
index 91747c282bb3fb3f1f6560cb5a722089aac9741e..18ae5ddef118c071e1240486e90f08be3e4b0871 100644 (file)
 /*
  * The following bits are implemented by the TLB hardware
  */
-#define _PAGE_GLOBAL_SHIFT     0
+#define _PAGE_NO_EXEC_SHIFT    0
+#define _PAGE_NO_EXEC          (1 << _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_NO_READ_SHIFT    (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_NO_READ          (1 << _PAGE_NO_READ_SHIFT)
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
@@ -49,7 +53,7 @@
 /*
  * The following bits are implemented in software
  */
-#define _PAGE_PRESENT_SHIFT    (_CACHE_SHIFT + 3)
+#define _PAGE_PRESENT_SHIFT    (24)
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT       (_PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_READ             (1 << _PAGE_READ_SHIFT)
 
 #define _PFN_SHIFT             (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
+/*
+ * Bits for extended EntryLo0/EntryLo1 registers
+ */
+#define _PFNX_MASK             0xffffff
+
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
 
 #else
 /*
- * When using the RI/XI bit support, we have 13 bits of flags below
- * the physical address. The RI/XI bits are placed such that a SRL 5
- * can strip off the software bits, then a ROTR 2 can move the RI/XI
- * into bits [63:62]. This also limits physical address to 56 bits,
- * which is more than we need right now.
+ * Below are the "Normal" R4K cases
  */
 
 /*
  */
 #define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT       (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
+/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
+#ifdef CONFIG_CPU_MIPSR2
+#define _PAGE_WRITE_SHIFT      (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+#else
+#define _PAGE_READ_SHIFT       (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ             (1 << _PAGE_READ_SHIFT)
 #define _PAGE_WRITE_SHIFT      (_PAGE_READ_SHIFT + 1)
 #define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+#endif
 #define _PAGE_ACCESSED_SHIFT   (_PAGE_WRITE_SHIFT + 1)
 #define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
+#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+/* Huge TLB page */
 #define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
+
+/* Only R2 or newer cores have the XI bit */
+#ifdef CONFIG_CPU_MIPSR2
+#define _PAGE_NO_EXEC_SHIFT    (_PAGE_SPLITTING_SHIFT + 1)
 #else
-#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
-#define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT)
-#define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
-#endif
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_SPLITTING_SHIFT + 1)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+#endif /* CONFIG_CPU_MIPSR2 */
 
-/* Page cannot be executed */
-#define _PAGE_NO_EXEC_SHIFT    (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
-#define _PAGE_NO_EXEC          ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
+#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
-/* Page cannot be read */
-#define _PAGE_NO_READ_SHIFT    (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
-#define _PAGE_NO_READ          ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
+#ifdef CONFIG_CPU_MIPSR2
+/* XI - page cannot be executed */
+#ifndef _PAGE_NO_EXEC_SHIFT
+#define _PAGE_NO_EXEC_SHIFT    (_PAGE_MODIFIED_SHIFT + 1)
+#endif
+#define _PAGE_NO_EXEC          (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+
+/* RI - page cannot be read */
+#define _PAGE_READ_SHIFT       (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_READ             (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
+#define _PAGE_NO_READ_SHIFT    _PAGE_READ_SHIFT
+#define _PAGE_NO_READ          (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
 
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+
+#else  /* !CONFIG_CPU_MIPSR2 */
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+#endif /* CONFIG_CPU_MIPSR2 */
+
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
 #define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
+#ifndef _PAGE_NO_EXEC
+#define _PAGE_NO_EXEC          0
+#endif
+#ifndef _PAGE_NO_READ
+#define _PAGE_NO_READ          0
+#endif
+
 #define _PAGE_SILENT_READ      _PAGE_VALID
 #define _PAGE_SILENT_WRITE     _PAGE_DIRTY
 
 #define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
 
-#ifndef _PAGE_NO_READ
-#define _PAGE_NO_READ ({BUG(); 0; })
-#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
-#endif
-#ifndef _PAGE_NO_EXEC
-#define _PAGE_NO_EXEC ({BUG(); 0; })
-#endif
+/*
+ * The final layouts of the PTE bits are:
+ *
+ *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P
+ *   32-bit, R1 or earler:      CCC D V G M A W R P
+ *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P
+ *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P
+ */
 
 
 #ifndef __ASSEMBLY__
  */
 static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 {
+#ifdef CONFIG_CPU_MIPSR2
        if (cpu_has_rixi) {
                int sa;
 #ifdef CONFIG_32BIT
@@ -186,6 +221,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
                return (pte_val >> _PAGE_GLOBAL_SHIFT) |
                        ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
        }
+#endif
 
        return pte_val >> _PAGE_GLOBAL_SHIFT;
 }
@@ -245,7 +281,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 #define _CACHE_UNCACHED_ACCELERATED    (7<<_CACHE_SHIFT)
 #endif
 
-#define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
+#define __READABLE     (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
 #define __WRITEABLE    (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
 #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED |      \
index bef782c4a44bd33b20f24dda9422f401e2324acc..819af9d057a8ba65fa1cb1e39ee5462556513aba 100644 (file)
@@ -24,17 +24,17 @@ struct mm_struct;
 struct vm_area_struct;
 
 #define PAGE_NONE      __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
                                 _page_cachable_default)
-#define PAGE_COPY      __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
-                                (cpu_has_rixi ?  _PAGE_NO_EXEC : 0) | _page_cachable_default)
-#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
+#define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
+                                _page_cachable_default)
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_READ | \
                                 _page_cachable_default)
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
                                 _PAGE_GLOBAL | _page_cachable_default)
 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
                                 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_USERIO    __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
+#define PAGE_USERIO    __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
                                 _page_cachable_default)
 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
                        __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
@@ -127,13 +127,9 @@ do {                                                                       \
        }                                                               \
 } while(0)
 
-
-extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
-       pte_t pteval);
-
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
-#define pte_none(pte)          (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#define pte_none(pte)          (!(((pte).pte_high) & ~_PAGE_GLOBAL))
 #define pte_present(pte)       ((pte).pte_low & _PAGE_PRESENT)
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -142,18 +138,17 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
        smp_wmb();
        ptep->pte_low = pte.pte_low;
 
-       if (pte.pte_low & _PAGE_GLOBAL) {
+       if (pte.pte_high & _PAGE_GLOBAL) {
                pte_t *buddy = ptep_buddy(ptep);
                /*
                 * Make sure the buddy is global too (if it's !none,
                 * it better already be global)
                 */
-               if (pte_none(*buddy)) {
-                       buddy->pte_low  |= _PAGE_GLOBAL;
+               if (pte_none(*buddy))
                        buddy->pte_high |= _PAGE_GLOBAL;
-               }
        }
 }
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -161,8 +156,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
 
        htw_stop();
        /* Preserve global status for the pair */
-       if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
-               null.pte_low = null.pte_high = _PAGE_GLOBAL;
+       if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
+               null.pte_high = _PAGE_GLOBAL;
 
        set_pte_at(mm, addr, ptep, null);
        htw_start();
@@ -192,6 +187,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
        }
 #endif
 }
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -242,21 +238,21 @@ static inline int pte_young(pte_t pte)    { return pte.pte_low & _PAGE_ACCESSED; }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-       pte.pte_low  &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+       pte.pte_low  &= ~_PAGE_WRITE;
        pte.pte_high &= ~_PAGE_SILENT_WRITE;
        return pte;
 }
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       pte.pte_low  &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+       pte.pte_low  &= ~_PAGE_MODIFIED;
        pte.pte_high &= ~_PAGE_SILENT_WRITE;
        return pte;
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-       pte.pte_low  &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
+       pte.pte_low  &= ~_PAGE_ACCESSED;
        pte.pte_high &= ~_PAGE_SILENT_READ;
        return pte;
 }
@@ -264,30 +260,24 @@ static inline pte_t pte_mkold(pte_t pte)
 static inline pte_t pte_mkwrite(pte_t pte)
 {
        pte.pte_low |= _PAGE_WRITE;
-       if (pte.pte_low & _PAGE_MODIFIED) {
-               pte.pte_low  |= _PAGE_SILENT_WRITE;
+       if (pte.pte_low & _PAGE_MODIFIED)
                pte.pte_high |= _PAGE_SILENT_WRITE;
-       }
        return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
        pte.pte_low |= _PAGE_MODIFIED;
-       if (pte.pte_low & _PAGE_WRITE) {
-               pte.pte_low  |= _PAGE_SILENT_WRITE;
+       if (pte.pte_low & _PAGE_WRITE)
                pte.pte_high |= _PAGE_SILENT_WRITE;
-       }
        return pte;
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
        pte.pte_low |= _PAGE_ACCESSED;
-       if (pte.pte_low & _PAGE_READ) {
-               pte.pte_low  |= _PAGE_SILENT_READ;
+       if (pte.pte_low & _PAGE_READ)
                pte.pte_high |= _PAGE_SILENT_READ;
-       }
        return pte;
 }
 #else
@@ -332,13 +322,13 @@ static inline pte_t pte_mkdirty(pte_t pte)
 static inline pte_t pte_mkyoung(pte_t pte)
 {
        pte_val(pte) |= _PAGE_ACCESSED;
-       if (cpu_has_rixi) {
-               if (!(pte_val(pte) & _PAGE_NO_READ))
-                       pte_val(pte) |= _PAGE_SILENT_READ;
-       } else {
-               if (pte_val(pte) & _PAGE_READ)
-                       pte_val(pte) |= _PAGE_SILENT_READ;
-       }
+#ifdef CONFIG_CPU_MIPSR2
+       if (!(pte_val(pte) & _PAGE_NO_READ))
+               pte_val(pte) |= _PAGE_SILENT_READ;
+       else
+#endif
+       if (pte_val(pte) & _PAGE_READ)
+               pte_val(pte) |= _PAGE_SILENT_READ;
        return pte;
 }
 
@@ -391,10 +381,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       pte.pte_low  &= _PAGE_CHG_MASK;
+       pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
        pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
-       pte.pte_low  |= pgprot_val(newprot);
-       pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+       pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
+       pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
        return pte;
 }
 #else
@@ -407,12 +397,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
        pte_t pte);
+extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
+       pte_t pte);
 
 static inline void update_mmu_cache(struct vm_area_struct *vma,
        unsigned long address, pte_t *ptep)
 {
        pte_t pte = *ptep;
        __update_tlb(vma, address, pte);
+       __update_cache(vma, address, pte);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
@@ -534,13 +527,13 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
 {
        pmd_val(pmd) |= _PAGE_ACCESSED;
 
-       if (cpu_has_rixi) {
-               if (!(pmd_val(pmd) & _PAGE_NO_READ))
-                       pmd_val(pmd) |= _PAGE_SILENT_READ;
-       } else {
-               if (pmd_val(pmd) & _PAGE_READ)
-                       pmd_val(pmd) |= _PAGE_SILENT_READ;
-       }
+#ifdef CONFIG_CPU_MIPSR2
+       if (!(pmd_val(pmd) & _PAGE_NO_READ))
+               pmd_val(pmd) |= _PAGE_SILENT_READ;
+       else
+#endif
+       if (pmd_val(pmd) & _PAGE_READ)
+               pmd_val(pmd) |= _PAGE_SILENT_READ;
 
        return pmd;
 }
index 1b22d2da88a1ec1b76e42dfde3cbff5cb5b422c2..38902bf97adc46577bf5bc61721a410dc530560c 100644 (file)
@@ -12,6 +12,8 @@
 #ifndef _ASM_R4KCACHE_H
 #define _ASM_R4KCACHE_H
 
+#include <linux/stringify.h>
+
 #include <asm/asm.h>
 #include <asm/cacheops.h>
 #include <asm/compiler.h>
@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
        "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
        "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
        "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
-       "       addiu $1, $0, 0x100                     \n"     \
+       "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
        "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
        "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
        "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
        "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
        "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
        "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
        "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
-       "       addiu $1, $1, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
        "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
        "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
-       "       addiu $1, $1, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100\n"      \
        "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
        "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
        "       .set noat\n"                                    \
        "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
        "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
        "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
        "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
        "       .set pop\n"                                     \
@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
        "       .set mips64r6\n"                                \
        "       .set noat\n"                                    \
        "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
-       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
-       "       addiu $1, %0, 0x100\n"                          \
+       "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
+       "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
        "       .set pop\n"                                     \
                :                                               \
                : "r" (base),                                   \
index b4548690ade9916e1d94e844641a505fad43bea4..1fca2e0793dcbbebd75784b90ad8e2513e0fcd45 100644 (file)
@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
                "1:     ll      %1, %2          # arch_read_unlock      \n"
-               "       addiu   %1,                                   \n"
+               "       addiu   %1, -1                                  \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
                : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
index 73ab840f13bd05a7579b51eb15dc3a927e81a448..e36515dcd3b29efcdb0014c7dfd4541805eb4e4c 100644 (file)
@@ -600,6 +600,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_MAAR;
        if (config5 & MIPS_CONF5_LLB)
                c->options |= MIPS_CPU_RW_LLB;
+#ifdef CONFIG_XPA
+       if (config5 & MIPS_CONF5_MVH)
+               c->options |= MIPS_CPU_XPA;
+#endif
 
        return config5 & MIPS_CONF_M;
 }
index af41ba6db9601d16540b945b0112e130bd9eb845..7791840cf22c0f7c058d32f3abb722eb132f90f8 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
+#include <asm/compiler.h>
 #include <asm/regdef.h>
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
@@ -185,7 +186,7 @@ syscall_exit_work:
  * For C code use the inline version named instruction_hazard().
  */
 LEAF(mips_ihb)
-       .set    mips32r2
+       .set    MIPS_ISA_LEVEL_RAW
        jr.hb   ra
        nop
        END(mips_ihb)
index 130af7d26a9c5de2fe2ec5fdd5969d9bd4a5f42a..298b2b773d12ba8f875d2d0e253ba19a60f93e2c 100644 (file)
@@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_msa)        seq_printf(m, "%s", " msa");
        if (cpu_has_eva)        seq_printf(m, "%s", " eva");
        if (cpu_has_htw)        seq_printf(m, "%s", " htw");
+       if (cpu_has_xpa)        seq_printf(m, "%s", " xpa");
        seq_printf(m, "\n");
 
        if (cpu_has_mmips) {
index bed7590e475f4366cace78f5c141859c2889e27c..d5589bedd0a402a7c5126af6fd3e6127e5f91c4b 100644 (file)
@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
 
        /* Make core 0 coherent with everything */
        write_gcr_cl_coherence(0xff);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       /* If we have an FPU, enroll ourselves in the FPU-full mask */
+       if (cpu_has_fpu)
+               cpu_set(0, mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
 }
 
 static void __init cps_prepare_cpus(unsigned int max_cpus)
index 27af5634a69ba0cc02971d01d933fb14eec47e4d..af84bef0c90de4bc65e14669dca132ebb7147846 100644 (file)
@@ -107,10 +107,11 @@ static u32 unaligned_action;
 extern void show_registers(struct pt_regs *regs);
 
 #ifdef __BIG_ENDIAN
-#define     LoadHW(addr, value, res)  \
+#define     _LoadHW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (".set\tnoat\n"        \
-                       "1:\t"user_lb("%0", "0(%2)")"\n"    \
-                       "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+                       "1:\t"type##_lb("%0", "0(%2)")"\n"  \
+                       "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -125,13 +126,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
 #ifndef CONFIG_CPU_MIPSR6
-#define     LoadW(addr, value, res)   \
+#define     _LoadW(addr, value, res, type)   \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_lwl("%0", "(%2)")"\n"    \
-                       "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+                       "1:\t"type##_lwl("%0", "(%2)")"\n"   \
+                       "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
                        "li\t%1, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -144,21 +147,24 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #else
 /* MIPSR6 has no lwl instruction */
-#define     LoadW(addr, value, res) \
+#define     _LoadW(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n"                      \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_lb("%0", "0(%2)")"\n\t"    \
-                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "1:"type##_lb("%0", "0(%2)")"\n\t"  \
+                       "2:"type##_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "3:"type##_lbu("$1", "2(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "4:"type##_lbu("$1", "3(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -176,14 +182,17 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t4b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #endif /* CONFIG_CPU_MIPSR6 */
 
-#define     LoadHWU(addr, value, res) \
+#define     _LoadHWU(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\t"user_lbu("%0", "0(%2)")"\n"   \
-                       "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+                       "1:\t"type##_lbu("%0", "0(%2)")"\n" \
+                       "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -199,13 +208,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
 #ifndef CONFIG_CPU_MIPSR6
-#define     LoadWU(addr, value, res)  \
+#define     _LoadWU(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_lwl("%0", "(%2)")"\n"    \
-                       "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+                       "1:\t"type##_lwl("%0", "(%2)")"\n"  \
+                       "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
                        "dsll\t%0, %0, 32\n\t"              \
                        "dsrl\t%0, %0, 32\n\t"              \
                        "li\t%1, 0\n"                       \
@@ -220,9 +231,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
-#define     LoadDW(addr, value, res)  \
+#define     _LoadDW(addr, value, res)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        "1:\tldl\t%0, (%2)\n"               \
                        "2:\tldr\t%0, 7(%2)\n\t"            \
@@ -238,21 +251,24 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #else
 /* MIPSR6 has not lwl and ldl instructions */
-#define            LoadWU(addr, value, res) \
+#define            _LoadWU(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_lbu("%0", "0(%2)")"\n\t"   \
-                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "1:"type##_lbu("%0", "0(%2)")"\n\t" \
+                       "2:"type##_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "3:"type##_lbu("$1", "2(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "4:"type##_lbu("$1", "3(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -270,9 +286,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t4b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
-#define     LoadDW(addr, value, res)  \
+#define     _LoadDW(addr, value, res)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
@@ -317,16 +335,19 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t8b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #endif /* CONFIG_CPU_MIPSR6 */
 
 
-#define     StoreHW(addr, value, res) \
+#define     _StoreHW(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\t"user_sb("%1", "1(%2)")"\n"    \
+                       "1:\t"type##_sb("%1", "1(%2)")"\n"  \
                        "srl\t$1, %1, 0x8\n"                \
-                       "2:\t"user_sb("$1", "0(%2)")"\n"    \
+                       "2:\t"type##_sb("$1", "0(%2)")"\n"  \
                        ".set\tat\n\t"                      \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
@@ -340,13 +361,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)
 
 #ifndef CONFIG_CPU_MIPSR6
-#define     StoreW(addr, value, res)  \
+#define     _StoreW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_swl("%1", "(%2)")"\n"    \
-                       "2:\t"user_swr("%1", "3(%2)")"\n\t" \
+                       "1:\t"type##_swl("%1", "(%2)")"\n"  \
+                       "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -359,9 +382,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT));  \
+} while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        "1:\tsdl\t%1,(%2)\n"                \
                        "2:\tsdr\t%1, 7(%2)\n\t"            \
@@ -377,20 +402,23 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT));  \
+} while(0)
+
 #else
 /* MIPSR6 has no swl and sdl instructions */
-#define     StoreW(addr, value, res)  \
+#define     _StoreW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_sb("%1", "3(%2)")"\n\t"    \
+                       "1:"type##_sb("%1", "3(%2)")"\n\t"  \
                        "srl\t$1, %1, 0x8\n\t"              \
-                       "2:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "2:"type##_sb("$1", "2(%2)")"\n\t"  \
                        "srl\t$1, $1,  0x8\n\t"             \
-                       "3:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "3:"type##_sb("$1", "1(%2)")"\n\t"  \
                        "srl\t$1, $1, 0x8\n\t"              \
-                       "4:"user_sb("$1", "0(%2)")"\n\t"    \
+                       "4:"type##_sb("$1", "0(%2)")"\n\t"  \
                        ".set\tpop\n\t"                     \
                        "li\t%0, 0\n"                       \
                        "10:\n\t"                           \
@@ -407,9 +435,11 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=&r" (res)                               \
                : "r" (value), "r" (addr), "i" (-EFAULT)    \
-               : "memory");
+               : "memory");                                \
+} while(0)
 
 #define     StoreDW(addr, value, res) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
@@ -449,15 +479,18 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=&r" (res)                               \
                : "r" (value), "r" (addr), "i" (-EFAULT)    \
-               : "memory");
+               : "memory");                                \
+} while(0)
+
 #endif /* CONFIG_CPU_MIPSR6 */
 
 #else /* __BIG_ENDIAN */
 
-#define     LoadHW(addr, value, res)  \
+#define     _LoadHW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (".set\tnoat\n"        \
-                       "1:\t"user_lb("%0", "1(%2)")"\n"    \
-                       "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+                       "1:\t"type##_lb("%0", "1(%2)")"\n"  \
+                       "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -472,13 +505,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
 #ifndef CONFIG_CPU_MIPSR6
-#define     LoadW(addr, value, res)   \
+#define     _LoadW(addr, value, res, type)   \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_lwl("%0", "3(%2)")"\n"   \
-                       "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
+                       "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+                       "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
                        "li\t%1, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -491,21 +526,24 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #else
 /* MIPSR6 has no lwl instruction */
-#define     LoadW(addr, value, res) \
+#define     _LoadW(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n"                      \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_lb("%0", "3(%2)")"\n\t"    \
-                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "1:"type##_lb("%0", "3(%2)")"\n\t"  \
+                       "2:"type##_lbu("$1", "2(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "3:"type##_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "4:"type##_lbu("$1", "0(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -523,15 +561,18 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t4b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #endif /* CONFIG_CPU_MIPSR6 */
 
 
-#define     LoadHWU(addr, value, res) \
+#define     _LoadHWU(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\t"user_lbu("%0", "1(%2)")"\n"   \
-                       "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+                       "1:\t"type##_lbu("%0", "1(%2)")"\n" \
+                       "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -547,13 +588,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
 #ifndef CONFIG_CPU_MIPSR6
-#define     LoadWU(addr, value, res)  \
+#define     _LoadWU(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_lwl("%0", "3(%2)")"\n"   \
-                       "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
+                       "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+                       "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
                        "dsll\t%0, %0, 32\n\t"              \
                        "dsrl\t%0, %0, 32\n\t"              \
                        "li\t%1, 0\n"                       \
@@ -568,9 +611,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
-#define     LoadDW(addr, value, res)  \
+#define     _LoadDW(addr, value, res)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        "1:\tldl\t%0, 7(%2)\n"              \
                        "2:\tldr\t%0, (%2)\n\t"             \
@@ -586,21 +631,24 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
+
 #else
 /* MIPSR6 has not lwl and ldl instructions */
-#define            LoadWU(addr, value, res) \
+#define            _LoadWU(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_lbu("%0", "3(%2)")"\n\t"   \
-                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "1:"type##_lbu("%0", "3(%2)")"\n\t" \
+                       "2:"type##_lbu("$1", "2(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "3:"type##_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
-                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "4:"type##_lbu("$1", "0(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -618,9 +666,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t4b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 
-#define     LoadDW(addr, value, res)  \
+#define     _LoadDW(addr, value, res)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
@@ -665,15 +715,17 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t8b, 11b\n\t"             \
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
-                       : "r" (addr), "i" (-EFAULT));
+                       : "r" (addr), "i" (-EFAULT));       \
+} while(0)
 #endif /* CONFIG_CPU_MIPSR6 */
 
-#define     StoreHW(addr, value, res) \
+#define     _StoreHW(addr, value, res, type) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\t"user_sb("%1", "0(%2)")"\n"    \
+                       "1:\t"type##_sb("%1", "0(%2)")"\n"  \
                        "srl\t$1,%1, 0x8\n"                 \
-                       "2:\t"user_sb("$1", "1(%2)")"\n"    \
+                       "2:\t"type##_sb("$1", "1(%2)")"\n"  \
                        ".set\tat\n\t"                      \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
@@ -687,12 +739,15 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)
+
 #ifndef CONFIG_CPU_MIPSR6
-#define     StoreW(addr, value, res)  \
+#define     _StoreW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
-                       "1:\t"user_swl("%1", "3(%2)")"\n"   \
-                       "2:\t"user_swr("%1", "(%2)")"\n\t"  \
+                       "1:\t"type##_swl("%1", "3(%2)")"\n" \
+                       "2:\t"type##_swr("%1", "(%2)")"\n\t"\
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -705,9 +760,11 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT));  \
+} while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        "1:\tsdl\t%1, 7(%2)\n"              \
                        "2:\tsdr\t%1, (%2)\n\t"             \
@@ -723,20 +780,23 @@ extern void show_registers(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT));  \
+} while(0)
+
 #else
 /* MIPSR6 has no swl and sdl instructions */
-#define     StoreW(addr, value, res)  \
+#define     _StoreW(addr, value, res, type)  \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
-                       "1:"user_sb("%1", "0(%2)")"\n\t"    \
+                       "1:"type##_sb("%1", "0(%2)")"\n\t"  \
                        "srl\t$1, %1, 0x8\n\t"              \
-                       "2:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "2:"type##_sb("$1", "1(%2)")"\n\t"  \
                        "srl\t$1, $1,  0x8\n\t"             \
-                       "3:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "3:"type##_sb("$1", "2(%2)")"\n\t"  \
                        "srl\t$1, $1, 0x8\n\t"              \
-                       "4:"user_sb("$1", "3(%2)")"\n\t"    \
+                       "4:"type##_sb("$1", "3(%2)")"\n\t"  \
                        ".set\tpop\n\t"                     \
                        "li\t%0, 0\n"                       \
                        "10:\n\t"                           \
@@ -753,9 +813,11 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=&r" (res)                               \
                : "r" (value), "r" (addr), "i" (-EFAULT)    \
-               : "memory");
+               : "memory");                                \
+} while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
+do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
                        ".set\tnoat\n\t"                    \
@@ -795,10 +857,28 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=&r" (res)                               \
                : "r" (value), "r" (addr), "i" (-EFAULT)    \
-               : "memory");
+               : "memory");                                \
+} while(0)
+
 #endif /* CONFIG_CPU_MIPSR6 */
 #endif
 
+#define LoadHWU(addr, value, res)      _LoadHWU(addr, value, res, kernel)
+#define LoadHWUE(addr, value, res)     _LoadHWU(addr, value, res, user)
+#define LoadWU(addr, value, res)       _LoadWU(addr, value, res, kernel)
+#define LoadWUE(addr, value, res)      _LoadWU(addr, value, res, user)
+#define LoadHW(addr, value, res)       _LoadHW(addr, value, res, kernel)
+#define LoadHWE(addr, value, res)      _LoadHW(addr, value, res, user)
+#define LoadW(addr, value, res)                _LoadW(addr, value, res, kernel)
+#define LoadWE(addr, value, res)       _LoadW(addr, value, res, user)
+#define LoadDW(addr, value, res)       _LoadDW(addr, value, res)
+
+#define StoreHW(addr, value, res)      _StoreHW(addr, value, res, kernel)
+#define StoreHWE(addr, value, res)     _StoreHW(addr, value, res, user)
+#define StoreW(addr, value, res)       _StoreW(addr, value, res, kernel)
+#define StoreWE(addr, value, res)      _StoreW(addr, value, res, user)
+#define StoreDW(addr, value, res)      _StoreDW(addr, value, res)
+
 static void emulate_load_store_insn(struct pt_regs *regs,
        void __user *addr, unsigned int __user *pc)
 {
@@ -870,7 +950,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                set_fs(seg);
                                goto sigbus;
                        }
-                       LoadHW(addr, value, res);
+                       LoadHWE(addr, value, res);
                        if (res) {
                                set_fs(seg);
                                goto fault;
@@ -883,7 +963,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                set_fs(seg);
                                goto sigbus;
                        }
-                               LoadW(addr, value, res);
+                               LoadWE(addr, value, res);
                        if (res) {
                                set_fs(seg);
                                goto fault;
@@ -896,7 +976,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                set_fs(seg);
                                goto sigbus;
                        }
-                       LoadHWU(addr, value, res);
+                       LoadHWUE(addr, value, res);
                        if (res) {
                                set_fs(seg);
                                goto fault;
@@ -911,7 +991,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        }
                        compute_return_epc(regs);
                        value = regs->regs[insn.spec3_format.rt];
-                       StoreHW(addr, value, res);
+                       StoreHWE(addr, value, res);
                        if (res) {
                                set_fs(seg);
                                goto fault;
@@ -924,7 +1004,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        }
                        compute_return_epc(regs);
                        value = regs->regs[insn.spec3_format.rt];
-                       StoreW(addr, value, res);
+                       StoreWE(addr, value, res);
                        if (res) {
                                set_fs(seg);
                                goto fault;
@@ -941,7 +1021,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               LoadHW(addr, value, res);
+               if (config_enabled(CONFIG_EVA)) {
+                       if (segment_eq(get_fs(), get_ds()))
+                               LoadHW(addr, value, res);
+                       else
+                               LoadHWE(addr, value, res);
+               } else {
+                       LoadHW(addr, value, res);
+               }
+
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -952,7 +1040,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               LoadW(addr, value, res);
+               if (config_enabled(CONFIG_EVA)) {
+                       if (segment_eq(get_fs(), get_ds()))
+                               LoadW(addr, value, res);
+                       else
+                               LoadWE(addr, value, res);
+               } else {
+                       LoadW(addr, value, res);
+               }
+
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -963,7 +1059,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               LoadHWU(addr, value, res);
+               if (config_enabled(CONFIG_EVA)) {
+                       if (segment_eq(get_fs(), get_ds()))
+                               LoadHWU(addr, value, res);
+                       else
+                               LoadHWUE(addr, value, res);
+               } else {
+                       LoadHWU(addr, value, res);
+               }
+
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -1022,7 +1126,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 
                compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               StoreHW(addr, value, res);
+
+               if (config_enabled(CONFIG_EVA)) {
+                       if (segment_eq(get_fs(), get_ds()))
+                               StoreHW(addr, value, res);
+                       else
+                               StoreHWE(addr, value, res);
+               } else {
+                       StoreHW(addr, value, res);
+               }
+
                if (res)
                        goto fault;
                break;
@@ -1033,7 +1146,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 
                compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               StoreW(addr, value, res);
+
+               if (config_enabled(CONFIG_EVA)) {
+                       if (segment_eq(get_fs(), get_ds()))
+                               StoreW(addr, value, res);
+                       else
+                               StoreWE(addr, value, res);
+               } else {
+                       StoreW(addr, value, res);
+               }
+
                if (res)
                        goto fault;
                break;
index 21221edda7a9a7968023eabb0d190cbe8b612f29..0f75b6b3d2184b2cf0a85e59037aa6ad589b1f6d 100644 (file)
@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
 
 static struct irqaction cascade_irqaction = {
        .handler = no_action,
+       .flags = IRQF_NO_SUSPEND,
        .name = "cascade",
 };
 
index 7e3ea77668224ff3da82eed073721a203b1a0588..f7b91d3a371dd07e7c69c38290f1f0c77b5b3a0a 100644 (file)
@@ -119,36 +119,25 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
 EXPORT_SYMBOL(__flush_anon_page);
 
-static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+       pte_t pte)
 {
        struct page *page;
-       unsigned long pfn = pte_pfn(pteval);
+       unsigned long pfn, addr;
+       int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
 
+       pfn = pte_pfn(pte);
        if (unlikely(!pfn_valid(pfn)))
                return;
-
        page = pfn_to_page(pfn);
        if (page_mapping(page) && Page_dcache_dirty(page)) {
-               unsigned long page_addr = (unsigned long) page_address(page);
-
-               if (!cpu_has_ic_fills_f_dc ||
-                   pages_do_alias(page_addr, address & PAGE_MASK))
-                       flush_data_cache_page(page_addr);
+               addr = (unsigned long) page_address(page);
+               if (exec || pages_do_alias(addr, address & PAGE_MASK))
+                       flush_data_cache_page(addr);
                ClearPageDcacheDirty(page);
        }
 }
 
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
-        pte_t *ptep, pte_t pteval)
-{
-        if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
-                if (pte_present(pteval))
-                        mips_flush_dcache_from_pte(pteval, addr);
-        }
-
-        set_pte(ptep, pteval);
-}
-
 unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
index 448cde372af016d5af38110876fb9cc36b644b0f..faa5c9822eccf48bc433f886a9939d93a1112bf1 100644 (file)
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_CMAP_END - idx);
        pte = mk_pte(page, prot);
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-       entrylo = pte.pte_high;
+       entrylo = pte_to_entrylo(pte.pte_high);
 #else
        entrylo = pte_to_entrylo(pte_val(pte));
 #endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
        write_c0_entryhi(vaddr & (PAGE_MASK << 1));
        write_c0_entrylo0(entrylo);
        write_c0_entrylo1(entrylo);
+#ifdef CONFIG_XPA
+       entrylo = (pte.pte_low & _PFNX_MASK);
+       writex_c0_entrylo0(entrylo);
+       writex_c0_entrylo1(entrylo);
+#endif
        tlbidx = read_c0_wired();
        write_c0_wired(tlbidx + 1);
        write_c0_index(tlbidx);
index 37ad381c3e5f0002452f59bc67ef054806a1f390..a27a088e6f9f830f7e3445abab6bae5cd8b60478 100644 (file)
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
                ptep = pte_offset_map(pmdp, address);
 
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+               write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+               writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+               ptep++;
+               write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+               writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
                write_c0_entrylo0(ptep->pte_high);
                ptep++;
                write_c0_entrylo1(ptep->pte_high);
+#endif
 #else
                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
                     unsigned long entryhi, unsigned long pagemask)
 {
+#ifdef CONFIG_XPA
+       panic("Broken for XPA kernels");
+#else
        unsigned long flags;
        unsigned long wired;
        unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        write_c0_pagemask(old_pagemask);
        local_flush_tlb_all();
        local_irq_restore(flags);
+#endif
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 7c7469f56ec30211d243b8b431107f36b2dc11ca..97c87027c17f8bdceedb688e1df4b0e5a5f13eb9 100644 (file)
 #include <asm/uasm.h>
 #include <asm/setup.h>
 
+static int __cpuinitdata mips_xpa_disabled;
+
+static int __init xpa_disable(char *s)
+{
+       mips_xpa_disabled = 1;
+
+       return 1;
+}
+
+__setup("noxpa", xpa_disable);
+
 /*
  * TLB load/store/modify handlers.
  *
@@ -231,14 +242,14 @@ static void output_pgtable_bits_defines(void)
        pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
        pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
 #endif
+#ifdef CONFIG_CPU_MIPSR2
        if (cpu_has_rixi) {
 #ifdef _PAGE_NO_EXEC_SHIFT
                pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
-#endif
-#ifdef _PAGE_NO_READ_SHIFT
                pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 #endif
        }
+#endif
        pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
        pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
        pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
@@ -501,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
        }
 
-       if (cpu_has_mips_r2_exec_hazard) {
-               /*
-                * The architecture spec says an ehb is required here,
-                * but a number of cores do not have the hazard and
-                * using an ehb causes an expensive pipeline stall.
-                */
-               switch (current_cpu_type()) {
-               case CPU_M14KC:
-               case CPU_74K:
-               case CPU_1074K:
-               case CPU_PROAPTIV:
-               case CPU_P5600:
-               case CPU_M5150:
-               case CPU_QEMU_GENERIC:
-                       break;
-
-               default:
+       if (cpu_has_mips_r2_r6) {
+               if (cpu_has_mips_r2_exec_hazard)
                        uasm_i_ehb(p);
-                       break;
-               }
                tlbw(p);
                return;
        }
@@ -1028,12 +1022,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
        } else {
                int pte_off_even = sizeof(pte_t) / 2;
                int pte_off_odd = pte_off_even + sizeof(pte_t);
+#ifdef CONFIG_XPA
+               const int scratch = 1; /* Our extra working register */
 
-               /* The pte entries are pre-shifted */
-               uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
-               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
-               uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
-               UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
+               uasm_i_addu(p, scratch, 0, ptep);
+#endif
+               uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+               uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+               UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+               UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+               UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
+#ifdef CONFIG_XPA
+               uasm_i_lw(p, tmp, 0, scratch);
+               uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
+               uasm_i_lui(p, scratch, 0xff);
+               uasm_i_ori(p, scratch, scratch, 0xffff);
+               uasm_i_and(p, tmp, scratch, tmp);
+               uasm_i_and(p, ptep, scratch, ptep);
+               uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+               uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
+#endif
        }
 #else
        UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1534,8 +1543,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
 {
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
-#endif
 
+       if (!cpu_has_64bits) {
+               const int scratch = 1; /* Our extra working register */
+
+               uasm_i_lui(p, scratch, (mode >> 16));
+               uasm_i_or(p, pte, pte, scratch);
+       } else
+#endif
        uasm_i_ori(p, pte, pte, mode);
 #ifdef CONFIG_SMP
 # ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1599,15 +1614,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
                        uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
                        uasm_i_nop(p);
                } else {
-                       uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+                       uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+                       uasm_i_andi(p, t, t, 1);
                        uasm_il_beqz(p, r, t, lid);
                        if (pte == t)
                                /* You lose the SMP race :-(*/
                                iPTE_LW(p, pte, ptr);
                }
        } else {
-               uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
-               uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+               uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+               uasm_i_andi(p, t, t, 3);
+               uasm_i_xori(p, t, t, 3);
                uasm_il_bnez(p, r, t, lid);
                if (pte == t)
                        /* You lose the SMP race :-(*/
@@ -1636,8 +1653,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
 {
        int t = scratch >= 0 ? scratch : pte;
 
-       uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
-       uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+       uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+       uasm_i_andi(p, t, t, 5);
+       uasm_i_xori(p, t, t, 5);
        uasm_il_bnez(p, r, t, lid);
        if (pte == t)
                /* You lose the SMP race :-(*/
@@ -1673,7 +1691,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
                uasm_i_nop(p);
        } else {
                int t = scratch >= 0 ? scratch : pte;
-               uasm_i_andi(p, t, pte, _PAGE_WRITE);
+               uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
+               uasm_i_andi(p, t, t, 1);
                uasm_il_beqz(p, r, t, lid);
                if (pte == t)
                        /* You lose the SMP race :-(*/
@@ -2286,6 +2305,11 @@ static void config_htw_params(void)
 
        pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
        pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+
+       /* If XPA has been enabled, PTEs are 64-bit in size. */
+       if (read_c0_pagegrain() & PG_ELPA)
+               pwsize |= 1;
+
        write_c0_pwsize(pwsize);
 
        /* Make sure everything is set before we enable the HTW */
@@ -2299,6 +2323,28 @@ static void config_htw_params(void)
        print_htw_config();
 }
 
+static void config_xpa_params(void)
+{
+#ifdef CONFIG_XPA
+       unsigned int pagegrain;
+
+       if (mips_xpa_disabled) {
+               pr_info("Extended Physical Addressing (XPA) disabled\n");
+               return;
+       }
+
+       pagegrain = read_c0_pagegrain();
+       write_c0_pagegrain(pagegrain | PG_ELPA);
+       back_to_back_c0_hazard();
+       pagegrain = read_c0_pagegrain();
+
+       if (pagegrain & PG_ELPA)
+               pr_info("Extended Physical Addressing (XPA) enabled\n");
+       else
+               panic("Extended Physical Addressing (XPA) disabled");
+#endif
+}
+
 void build_tlb_refill_handler(void)
 {
        /*
@@ -2363,8 +2409,9 @@ void build_tlb_refill_handler(void)
                }
                if (cpu_has_local_ebase)
                        build_r4000_tlb_refill_handler();
+               if (cpu_has_xpa)
+                       config_xpa_params();
                if (cpu_has_htw)
                        config_htw_params();
-
        }
 }
index 32c27cb8e463af5dce20988a3552ff0158e5b5b1..b769657be4d4609f9e4545ac97147ca63000a363 100644 (file)
@@ -54,6 +54,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
                pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
                physical_memsize = 0x02000000;
        } else {
+               if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
+                       pr_warn("Unsupported memsize value (0x%lx) detected! "
+                               "Using 0x10000000 (256M) instead\n",
+                               memsize);
+                       memsize = 256 << 20;
+               }
                /* If ememsize is set, then set physical_memsize to that */
                physical_memsize = ememsize ? : memsize;
        }
index c83dbf3689e2b9d0453ccca28dceffcbe1c3f798..7b066a44e6798e71447e55633ff1cb8334a2948d 100644 (file)
@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
 static void config_sata_phy(u64 regbase)
 {
        u32 port, i, reg;
+       u8 val;
 
        for (port = 0; port < 2; port++) {
                for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
 
                for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
                        write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
+
+               /* Fix for PHY link up failures at lower temperatures */
+               write_phy_reg(regbase, 0x800F, port, 0x1f);
+
+               val = read_phy_reg(regbase, 0x0029, port);
+               write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
+
+               val = read_phy_reg(regbase, 0x0056, port);
+               write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
+
+               val = read_phy_reg(regbase, 0x0018, port);
+               write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
        }
 }
 
index 300591c6278daa07a67bf0c20ed2d223552ea20a..2eda01e6e08fd38d5e572f5e66283536d9e9cd34 100644 (file)
@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80)  += pci-bcm1480.o pci-bcm1480ht.o
 obj-$(CONFIG_SNI_RM)           += fixup-sni.o ops-sni.o
 obj-$(CONFIG_LANTIQ)           += fixup-lantiq.o
 obj-$(CONFIG_PCI_LANTIQ)       += pci-lantiq.o ops-lantiq.o
-obj-$(CONFIG_SOC_RT2880)       += pci-rt2880.o
+obj-$(CONFIG_SOC_RT288X)       += pci-rt2880.o
 obj-$(CONFIG_SOC_RT3883)       += pci-rt3883.o
 obj-$(CONFIG_TANBAC_TB0219)    += fixup-tb0219.o
 obj-$(CONFIG_TANBAC_TB0226)    += fixup-tb0226.o
index a04af55d89f10a55593b2b16f53f20c7c16ee95d..c258cd406fbbe39b2e657b60e5e43545526d9a4e 100644 (file)
@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
                return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
        case CVMX_BOARD_TYPE_BBGW_REF:
                return "AABCD";
+       case CVMX_BOARD_TYPE_CUST_DSR1000N:
+               return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
        case CVMX_BOARD_TYPE_THUNDER:
        case CVMX_BOARD_TYPE_EBH3000:
        default:
@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
        pci_addr.s.func = devfn & 0x7;
        pci_addr.s.reg = reg;
 
-#if PCI_CONFIG_SPACE_DELAY
-       udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
        switch (size) {
        case 4:
                *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
        pci_addr.s.func = devfn & 0x7;
        pci_addr.s.reg = reg;
 
-#if PCI_CONFIG_SPACE_DELAY
-       udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
        switch (size) {
        case 4:
                cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
index 1bb0b2bf8d6ea1e411266fd3d8de6a5afc50fba0..99f3db4f0a9b1b060476e6d5873fd45dd15a649b 100644 (file)
@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
        default:
                return PCIBIOS_FUNC_NOT_SUPPORTED;
        }
-#if PCI_CONFIG_SPACE_DELAY
-       /*
-        * Delay on writes so that devices have time to come up. Some
-        * bridges need this to allow time for the secondary busses to
-        * work
-        */
-       udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
        return PCIBIOS_SUCCESSFUL;
 }
 
index b1c52ca580f983bdc64d185fbd65615f108395a3..e9bc8c96174e227a8d9cf730bcc4bfb0ff0fb1cf 100644 (file)
@@ -7,6 +7,11 @@ config CLKEVT_RT3352
        select CLKSRC_OF
        select CLKSRC_MMIO
 
+config RALINK_ILL_ACC
+       bool
+       depends on SOC_RT305X
+       default y
+
 choice
        prompt "Ralink SoC selection"
        default SOC_RT305X
index f213f5b4c4239b961e260c659447886a8646f0bd..d17437238a2cef75dd4d46593c760320933b6d38 100644 (file)
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
        if (likely(pgd != NULL)) {
                memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
                actual_pgd += PTRS_PER_PGD;
                /* Populate first pmd with allocated memory.  We mark it
                 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
        pgd -= PTRS_PER_PGD;
 #endif
        free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-#ifdef CONFIG_64BIT
        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-               /* This is the permanent pmd attached to the pgd;
-                * cannot free it */
+               /*
+                * This is the permanent pmd attached to the pgd;
+                * cannot free it.
+                * Increment the counter to compensate for the decrement
+                * done by generic mm code.
+                */
+               mm_inc_nr_pmds(mm);
                return;
-#endif
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
 
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
        /* preserve the gateway marker if this is the beginning of
         * the permanent pmd */
        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
index 5a8997d63899346a82e759f30b601eaf907bac37..8eefb12d1d33f3fc87195a0cd9efe0d0fd0301b2 100644 (file)
@@ -55,8 +55,8 @@
 #define ENTRY_COMP(_name_) .word sys_##_name_
 #endif
 
-       ENTRY_SAME(restart_syscall)     /* 0 */
-       ENTRY_SAME(exit)
+90:    ENTRY_SAME(restart_syscall)     /* 0 */
+91:    ENTRY_SAME(exit)
        ENTRY_SAME(fork_wrapper)
        ENTRY_SAME(read)
        ENTRY_SAME(write)
        ENTRY_SAME(bpf)
        ENTRY_COMP(execveat)
 
-       /* Nothing yet */
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
 
 #undef ENTRY_SAME
 #undef ENTRY_DIFF
index 2bf8e9307be9833b5bf9af51defdfada8feae68d..4c8ad592ae3351d96f7e225411d9f551657c5bc7 100644 (file)
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
 
 static inline int cpu_nr_cores(void)
 {
-       return NR_CPUS >> threads_shift;
+       return nr_cpu_ids >> threads_shift;
 }
 
 static inline cpumask_t cpu_online_cores_map(void)
index 03cd858a401c067b1b34f18e9b5fddd521e8fe2d..4cbe23af400ab622749b31f980d5d72d6b8587ac 100644 (file)
 #define PPC_INST_MFSPR_PVR_MASK                0xfc1fffff
 #define PPC_INST_MFTMR                 0x7c0002dc
 #define PPC_INST_MSGSND                        0x7c00019c
+#define PPC_INST_MSGCLR                        0x7c0001dc
 #define PPC_INST_MSGSNDP               0x7c00011c
 #define PPC_INST_MTTMR                 0x7c0003dc
 #define PPC_INST_NOP                   0x60000000
                                        ___PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)          stringify_in_c(.long PPC_INST_MSGSND | \
                                        ___PPC_RB(b))
+#define PPC_MSGCLR(b)          stringify_in_c(.long PPC_INST_MSGCLR | \
+                                       ___PPC_RB(b))
 #define PPC_MSGSNDP(b)         stringify_in_c(.long PPC_INST_MSGSNDP | \
                                        ___PPC_RB(b))
 #define PPC_POPCNTB(a, s)      stringify_in_c(.long PPC_INST_POPCNTB | \
index 1c874fb533bbf22fe8ff019b328ae623214243bb..af56b5c6c81ab18aefe24b16c810a3b89604434d 100644 (file)
 #define   SRR1_ISI_N_OR_G      0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT                0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK                0x00380000 /* reason for wakeup */
+#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 */
 #define   SRR1_WAKESYSERR      0x00300000 /* System error */
 #define   SRR1_WAKEEE          0x00200000 /* External interrupt */
 #define   SRR1_WAKEMT          0x00280000 /* mtctrl */
 #define          SRR1_WAKEHMI          0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC         0x00180000 /* Decrementer interrupt */
+#define   SRR1_WAKEDBELL       0x00140000 /* Privileged doorbell on P8 */
 #define   SRR1_WAKETHERM       0x00100000 /* Thermal management interrupt */
 #define          SRR1_WAKERESET        0x00100000 /* System reset */
+#define   SRR1_WAKEHDBELL      0x000c0000 /* Hypervisor doorbell on P8 */
 #define          SRR1_WAKESTATE        0x00030000 /* Powersave exit mask [46:47] */
 #define          SRR1_WS_DEEPEST       0x00030000 /* Some resources not maintained,
                                          * may not be recoverable */
index f337666768a76594b41b59f283efa3b891988193..f8304687833651975722e4f8aa51741fffad63d9 100644 (file)
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8NVL */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x004c0000,
+               .cpu_name               = "POWER8NVL (raw)",
+               .cpu_features           = CPU_FTRS_POWER8,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 DD1: Does not support doorbell IPIs */
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x004d0100,
index f4217819cc31fd417243d17f079c8bf2471d37a5..2128f3a96c32dd743908f286d4cf72d6d7701568 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/dbell.h>
 #include <asm/irq_regs.h>
+#include <asm/kvm_ppc.h>
 
 #ifdef CONFIG_SMP
 void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
 
        may_hard_irq_enable();
 
+       kvmppc_set_host_ipi(smp_processor_id(), 0);
        __this_cpu_inc(irq_stat.doorbell_irqs);
 
        smp_ipi_demux();
index c2df8150bd7a0425fc00ebcc78d784d6b280c146..9519e6bdc6d75c324334bf4f0f52dd6da9d9bbcc 100644 (file)
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
        bne     9f                      /* continue in V mode if we are. */
 
 5:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
        /*
         * We are coming from kernel context. Check if we are coming from
         * guest. if yes, then we can continue. We will fall through
index de4018a1bc4bd290ecd0e970bd51960b281497c3..de747563d29df39be580e6cd67c54769194dce42 100644 (file)
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
        spin_lock(&vcpu->arch.vpa_update_lock);
        lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
        if (lppaca)
-               yield_count = lppaca->yield_count;
+               yield_count = be32_to_cpu(lppaca->yield_count);
        spin_unlock(&vcpu->arch.vpa_update_lock);
        return yield_count;
 }
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                bool preserve_top32)
 {
+       struct kvm *kvm = vcpu->kvm;
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        u64 mask;
 
+       mutex_lock(&kvm->lock);
        spin_lock(&vc->lock);
        /*
         * If ILE (interrupt little-endian) has changed, update the
         * MSR_LE bit in the intr_msr for each vcpu in this vcore.
         */
        if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
-               struct kvm *kvm = vcpu->kvm;
                struct kvm_vcpu *vcpu;
                int i;
 
-               mutex_lock(&kvm->lock);
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->arch.vcore != vc)
                                continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                        else
                                vcpu->arch.intr_msr &= ~MSR_LE;
                }
-               mutex_unlock(&kvm->lock);
        }
 
        /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                mask &= 0xFFFFFFFF;
        vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
        spin_unlock(&vc->lock);
+       mutex_unlock(&kvm->lock);
 }
 
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
index bb94e6f20c813bbcdb8297d9fb860a538c5e4f08..6cbf1630cb70c9d8b306a7628c91a3c9acf35785 100644 (file)
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        /* Save HEIR (HV emulation assist reg) in emul_inst
           if this is an HEI (HV emulation interrupt, e40) */
        li      r3,KVM_INST_FETCH_FAILED
+       stw     r3,VCPU_LAST_INST(r9)
        cmpwi   r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
        bne     11f
        mfspr   r3,SPRN_HEIR
index fc34025ef82270b00c7c32811dca20552a61d2fe..38a45088f633bb190ff53b5f7ac8306fd24acb4e 100644 (file)
@@ -33,6 +33,8 @@
 #include <asm/runlatch.h>
 #include <asm/code-patching.h>
 #include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
 
 #include "powernv.h"
 
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
 static void pnv_smp_cpu_kill_self(void)
 {
        unsigned int cpu;
-       unsigned long srr1;
+       unsigned long srr1, wmask;
        u32 idle_states;
 
        /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
        generic_set_cpu_dead(cpu);
        smp_wmb();
 
+       wmask = SRR1_WAKEMASK;
+       if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               wmask = SRR1_WAKEMASK_P8;
+
        idle_states = pnv_get_supported_cpuidle_states();
        /* We don't want to take decrementer interrupts while we are offline,
         * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
                 * having finished executing in a KVM guest, then srr1
                 * contains 0.
                 */
-               if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+               if ((srr1 & wmask) == SRR1_WAKEEE) {
                        icp_native_flush_interrupt();
                        local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
                        smp_mb();
+               } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+                       unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+                       asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+                       kvmppc_set_host_ipi(cpu, 0);
                }
 
                if (cpu_core_split_required())
index 90cf3dcbd9f268b5430a1cb69bb44cd0fd75d54a..8f35d525cede8327ac73c2129810d83456d35c15 100644 (file)
 static struct kobject *mobility_kobj;
 
 struct update_props_workarea {
-       u32 phandle;
-       u32 state;
-       u64 reserved;
-       u32 nprops;
+       __be32 phandle;
+       __be32 state;
+       __be64 reserved;
+       __be32 nprops;
 } __packed;
 
 #define NODE_ACTION_MASK       0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
        return rc;
 }
 
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
 {
        struct device_node *dn;
 
-       dn = of_find_node_by_phandle(phandle);
+       dn = of_find_node_by_phandle(be32_to_cpu(phandle));
        if (!dn)
                return -ENOENT;
 
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
        return 0;
 }
 
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
 {
        struct update_props_workarea *upwa;
        struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
        char *prop_data;
        char *rtas_buf;
        int update_properties_token;
+       u32 nprops;
        u32 vd;
 
        update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
        if (!rtas_buf)
                return -ENOMEM;
 
-       dn = of_find_node_by_phandle(phandle);
+       dn = of_find_node_by_phandle(be32_to_cpu(phandle));
        if (!dn) {
                kfree(rtas_buf);
                return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
                        break;
 
                prop_data = rtas_buf + sizeof(*upwa);
+               nprops = be32_to_cpu(upwa->nprops);
 
                /* On the first call to ibm,update-properties for a node the
                 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
                 */
                if (*prop_data == 0) {
                        prop_data++;
-                       vd = *(u32 *)prop_data;
+                       vd = be32_to_cpu(*(__be32 *)prop_data);
                        prop_data += vd + sizeof(vd);
-                       upwa->nprops--;
+                       nprops--;
                }
 
-               for (i = 0; i < upwa->nprops; i++) {
+               for (i = 0; i < nprops; i++) {
                        char *prop_name;
 
                        prop_name = prop_data;
                        prop_data += strlen(prop_name) + 1;
-                       vd = *(u32 *)prop_data;
+                       vd = be32_to_cpu(*(__be32 *)prop_data);
                        prop_data += sizeof(vd);
 
                        switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
        return 0;
 }
 
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
 {
        struct device_node *dn;
        struct device_node *parent_dn;
        int rc;
 
-       parent_dn = of_find_node_by_phandle(parent_phandle);
+       parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
        if (!parent_dn)
                return -ENOENT;
 
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
 int pseries_devicetree_update(s32 scope)
 {
        char *rtas_buf;
-       u32 *data;
+       __be32 *data;
        int update_nodes_token;
        int rc;
 
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
                if (rc && rc != 1)
                        break;
 
-               data = (u32 *)rtas_buf + 4;
-               while (*data & NODE_ACTION_MASK) {
+               data = (__be32 *)rtas_buf + 4;
+               while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
                        int i;
-                       u32 action = *data & NODE_ACTION_MASK;
-                       int node_count = *data & NODE_COUNT_MASK;
+                       u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+                       u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
 
                        data++;
 
                        for (i = 0; i < node_count; i++) {
-                               u32 phandle = *data++;
-                               u32 drc_index;
+                               __be32 phandle = *data++;
+                               __be32 drc_index;
 
                                switch (action) {
                                case DELETE_DT_NODE:
index c9df40b5c0ac1915eac7d6a794b25f7f9d6173b5..c9c875d9ed318cc75f7123a8d999b464201a4fd5 100644 (file)
@@ -211,7 +211,7 @@ do {                                                                \
 
 extern unsigned long mmap_rnd_mask;
 
-#define STACK_RND_MASK (mmap_rnd_mask)
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
 
 #define ARCH_DLINFO                                                        \
 do {                                                                       \
index 82c19899574f83070158c09c5eed5b438bc092c1..6c79f1b44fe7f7aafd8b5379833fb6ae42cf6f72 100644 (file)
 
 unsigned long ftrace_plt;
 
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+       /* brcl 0,0 */
+       insn->opc = 0xc004;
+       insn->disp = 0;
+#else
+       /* stg r14,8(r15) */
+       insn->opc = 0xe3e0;
+       insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       if (insn->opc == BREAKPOINT_INSTRUCTION)
+               return 1;
+#endif
+       return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       insn->opc = BREAKPOINT_INSTRUCTION;
+       insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       insn->opc = BREAKPOINT_INSTRUCTION;
+       insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
 {
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                return -EFAULT;
        if (addr == MCOUNT_ADDR) {
                /* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
-               /* We expect to see brcl 0,0 */
-               ftrace_generate_nop_insn(&orig);
-#else
-               /* We expect to see stg r14,8(r15) */
-               orig.opc = 0xe3e0;
-               orig.disp = 0xf0080024;
-#endif
+               ftrace_generate_orig_insn(&orig);
                ftrace_generate_nop_insn(&new);
-       } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+       } else if (is_kprobe_on_ftrace(&old)) {
                /*
                 * If we find a breakpoint instruction, a kprobe has been
                 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                 * bytes of the original instruction so that the kprobes
                 * handler can execute a nop, if it reaches this breakpoint.
                 */
-               new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-               orig.disp = KPROBE_ON_FTRACE_CALL;
-               new.disp = KPROBE_ON_FTRACE_NOP;
+               ftrace_generate_kprobe_call_insn(&orig);
+               ftrace_generate_kprobe_nop_insn(&new);
        } else {
                /* Replace ftrace call with a nop. */
                ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
-       if (old.opc == BREAKPOINT_INSTRUCTION) {
+       if (is_kprobe_on_ftrace(&old)) {
                /*
                 * If we find a breakpoint instruction, a kprobe has been
                 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * bytes of the original instruction so that the kprobes
                 * handler can execute a brasl if it reaches this breakpoint.
                 */
-               new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-               orig.disp = KPROBE_ON_FTRACE_NOP;
-               new.disp = KPROBE_ON_FTRACE_CALL;
+               ftrace_generate_kprobe_nop_insn(&orig);
+               ftrace_generate_kprobe_call_insn(&new);
        } else {
                /* Replace nop with an ftrace call. */
                ftrace_generate_nop_insn(&orig);
index c3f8d157cb0d11898bbd72d103dd7b4aceb4b654..e6a1578fc00095929db02b51d4894fe9ad59802c 100644 (file)
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
 static struct attribute *cpumsf_pmu_events_attr[] = {
        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
-       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+       NULL,
        NULL,
 };
 
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
                return -EINVAL;
        }
 
-       if (si.ad)
+       if (si.ad) {
                sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+               cpumsf_pmu_events_attr[1] =
+                       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+       }
 
        sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
        if (!sfdbg)
index 6b09fdffbd2f7e7a787ca5100f074be15ff71f33..ca6294645dd37eeec22c4ab3d26226da37686cd0 100644 (file)
@@ -177,6 +177,17 @@ restart_entry:
        lhi     %r1,1
        sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
        sam64
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mt_shift
+       icm     %r1,15,0(%r1)
+       jz      smt_done
+       llgfr   %r1,%r1
+smt_loop:
+       sigp    %r1,%r0,SIGP_SET_MULTI_THREADING
+       brc     8,smt_done                      /* accepted */
+       brc     2,smt_loop                      /* busy, try again */
+smt_done:
+#endif
        larl    %r1,.Lnew_pgm_check_psw
        lpswe   0(%r1)
 pgm_check_entry:
index 4f6725ff4c336878fd3600f840f81897fdc65099..f5b6537306f0b3259e2313f3ec03b3ed32215c49 100644 (file)
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
                                   unsigned long reg_val);
 #endif
 
+
+#define HV_FAST_M7_GET_PERFREG 0x43
+#define HV_FAST_M7_SET_PERFREG 0x44
+
+#ifndef        __ASSEMBLY__
+unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
+                                     unsigned long *reg_val);
+unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
+                                     unsigned long reg_val);
+#endif
+
 /* Function numbers for HV_CORE_TRAP.  */
 #define HV_CORE_SET_VER                        0x00
 #define HV_CORE_PUTCHAR                        0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
 #define HV_GRP_SDIO                    0x0108
 #define HV_GRP_SDIO_ERR                        0x0109
 #define HV_GRP_REBOOT_DATA             0x0110
+#define HV_GRP_M7_PERF                 0x0114
 #define HV_GRP_NIAG_PERF               0x0200
 #define HV_GRP_FIRE_PERF               0x0201
 #define HV_GRP_N2_CPU                  0x0202
index 5c55145bfbf02d616ec606feb94539e26cbe1831..662500fa555f74160f6449143e6d0785af2643e9 100644 (file)
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
        { .group = HV_GRP_VT_CPU,                               },
        { .group = HV_GRP_T5_CPU,                               },
        { .group = HV_GRP_DIAG,         .flags = FLAG_PRE_API   },
+       { .group = HV_GRP_M7_PERF,                              },
 };
 
 static DEFINE_SPINLOCK(hvapi_lock);
index caedf8320416e1fe8bfc4cb76369ce1efd8ce729..afbaba52d2f16cb30daf092f5cd3986e7ca9c299 100644 (file)
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
        retl
         nop
 ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+       mov     %o1, %o4
+       mov     HV_FAST_M7_GET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+       nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+       mov     HV_FAST_M7_SET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       retl
+       nop
+ENDPROC(sun4v_m7_set_perfreg)
index 7e967c8018c8cf19ba1886b39b087d9eb0638b0c..eb978c77c76a78d401e5dce22b4f5bafa8ef948d 100644 (file)
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
        .pcr_nmi_disable        = PCR_N4_PICNPT,
 };
 
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+       unsigned long val;
+
+       (void) sun4v_m7_get_perfreg(reg_num, &val);
+
+       return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+       (void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+       .read_pcr               = m7_pcr_read,
+       .write_pcr              = m7_pcr_write,
+       .read_pic               = n4_pic_read,
+       .write_pic              = n4_pic_write,
+       .nmi_picl_value         = n4_picl_value,
+       .pcr_nmi_enable         = (PCR_N4_PICNPT | PCR_N4_STRACE |
+                                  PCR_N4_UTRACE | PCR_N4_TOE |
+                                  (26 << PCR_N4_SL_SHIFT)),
+       .pcr_nmi_disable        = PCR_N4_PICNPT,
+};
 
 static unsigned long perf_hsvc_group;
 static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
                        perf_hsvc_group = HV_GRP_T5_CPU;
                        break;
 
+               case SUN4V_CHIP_SPARC_M7:
+                       perf_hsvc_group = HV_GRP_M7_PERF;
+                       break;
+
                default:
                        return -ENODEV;
                }
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
                pcr_ops = &n5_pcr_ops;
                break;
 
+       case SUN4V_CHIP_SPARC_M7:
+               pcr_ops = &m7_pcr_ops;
+               break;
+
        default:
                ret = -ENODEV;
                break;
index 46a5e4508752814cb0ec21b820673cd445b19ada..86eebfa3b15873dfd60117389e9c80f0018a13e0 100644 (file)
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
        .num_pic_regs   = 4,
 };
 
+static void sparc_m7_write_pmc(int idx, u64 val)
+{
+       u64 pcr;
+
+       pcr = pcr_ops->read_pcr(idx);
+       /* ensure ov and ntc are reset */
+       pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
+
+       pcr_ops->write_pic(idx, val & 0xffffffff);
+
+       pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu sparc_m7_pmu = {
+       .event_map      = niagara4_event_map,
+       .cache_map      = &niagara4_cache_map,
+       .max_events     = ARRAY_SIZE(niagara4_perfmon_event_map),
+       .read_pmc       = sparc_vt_read_pmc,
+       .write_pmc      = sparc_m7_write_pmc,
+       .upper_shift    = 5,
+       .lower_shift    = 5,
+       .event_mask     = 0x7ff,
+       .user_bit       = PCR_N4_UTRACE,
+       .priv_bit       = PCR_N4_STRACE,
+
+       /* We explicitly don't support hypervisor tracing. */
+       .hv_bit         = 0,
+
+       .irq_bit        = PCR_N4_TOE,
+       .upper_nop      = 0,
+       .lower_nop      = 0,
+       .flags          = 0,
+       .max_hw_events  = 4,
+       .num_pcrs       = 4,
+       .num_pic_regs   = 4,
+};
 static const struct sparc_pmu *sparc_pmu __read_mostly;
 
 static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
        cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
 }
 
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
 /* On this PMU each PIC has it's own PCR control register.  */
 static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
 {
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
                struct perf_event *cp = cpuc->event[i];
                struct hw_perf_event *hwc = &cp->hw;
                int idx = hwc->idx;
-               u64 enc;
 
                if (cpuc->current_idx[i] != PIC_NO_INDEX)
                        continue;
 
-               sparc_perf_event_set_period(cp, hwc, idx);
                cpuc->current_idx[i] = idx;
 
-               enc = perf_event_get_enc(cpuc->events[i]);
-               cpuc->pcr[idx] &= ~mask_for_index(idx);
-               if (hwc->state & PERF_HES_STOPPED)
-                       cpuc->pcr[idx] |= nop_for_index(idx);
-               else
-                       cpuc->pcr[idx] |= event_encoding(enc, idx);
+               sparc_pmu_start(cp, PERF_EF_RELOAD);
        }
 out:
        for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
        int i;
 
        local_irq_save(flags);
-       perf_pmu_disable(event->pmu);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
                }
        }
 
-       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_pmu_disable(event->pmu);
 
        n0 = cpuc->n_events;
        if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
 
        ret = 0;
 out:
-       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
                sparc_pmu = &niagara4_pmu;
                return true;
        }
+       if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+               sparc_pmu = &sparc_m7_pmu;
+               return true;
+       }
        return false;
 }
 
index 0be7bf978cb1da03b8d657958ec6d0ec3c4e7d8b..46a59643bb1cee71edbd3d91d3f8f2eb09b3a26a 100644 (file)
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
                        printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
                               gp->tpc, gp->o7, gp->i7, gp->rpc);
                }
+
+               touch_nmi_watchdog();
        }
 
        memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
                       (cpu == this_cpu ? '*' : ' '), cpu,
                       pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
                       pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+               touch_nmi_watchdog();
        }
 
        memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
index b7f6334e159f9d22fc34ccc2f003511009ca9b61..857ad4f8905f942f44ac40770c9277323e4e3c09 100644 (file)
@@ -8,9 +8,11 @@
 
        .text
 ENTRY(memmove) /* o0=dst o1=src o2=len */
-       mov             %o0, %g1
+       brz,pn          %o2, 99f
+        mov            %o0, %g1
+
        cmp             %o0, %o1
-       bleu,pt         %xcc, memcpy
+       bleu,pt         %xcc, 2f
         add            %o1, %o2, %g7
        cmp             %g7, %o0
        bleu,pt         %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
        stb             %g7, [%o0]
        bne,pt          %icc, 1b
         sub            %o0, 1, %o0
-
+99:
        retl
         mov            %g1, %o0
+
+       /* We can't just call memcpy for these memmove cases.  On some
+        * chips the memcpy uses cache initializing stores and when dst
+        * and src are close enough, those can clobber the source data
+        * before we've loaded it in.
+        */
+2:     or              %o0, %o1, %g7
+       or              %o2, %g7, %g7
+       andcc           %g7, 0x7, %g0
+       bne,pn          %xcc, 4f
+        nop
+
+3:     ldx             [%o1], %g7
+       add             %o1, 8, %o1
+       subcc           %o2, 8, %o2
+       add             %o0, 8, %o0
+       bne,pt          %icc, 3b
+        stx            %g7, [%o0 - 0x8]
+       ba,a,pt         %xcc, 99b
+
+4:     ldub            [%o1], %g7
+       add             %o1, 1, %o1
+       subcc           %o2, 1, %o2
+       add             %o0, 1, %o0
+       bne,pt          %icc, 4b
+        stb            %g7, [%o0 - 0x1]
+       ba,a,pt         %xcc, 99b
 ENDPROC(memmove)
index 498b6d967138b1fff29659e81813c77e70ff1f58..258990688a5e999557de7f3b5398d7eccdc45ebb 100644 (file)
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-       INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
        EVENT_CONSTRAINT_END
 };
 
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        if (c)
                return c;
 
-       c = intel_pebs_constraints(event);
+       c = intel_shared_regs_constraints(cpuc, event);
        if (c)
                return c;
 
-       c = intel_shared_regs_constraints(cpuc, event);
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
index 1d74d161687c9f2a71f334b5530067356310af18..f0095a76c18211813d711bfa52b82c916190f42d 100644 (file)
@@ -364,12 +364,21 @@ system_call_fastpath:
  * Has incomplete stack frame and undefined top of stack.
  */
 ret_from_sys_call:
-       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       jnz int_ret_from_sys_call_fixup /* Go the the slow path */
-
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
+
+       /*
+        * We must check ti flags with interrupts (or at least preemption)
+        * off because we must *never* return to userspace without
+        * processing exit work that is enqueued if we're preempted here.
+        * In particular, returning to userspace with any of the one-shot
+        * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+        * very bad.
+        */
+       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
        CFI_REMEMBER_STATE
        /*
         * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
 
 int_ret_from_sys_call_fixup:
        FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
-       jmp int_ret_from_sys_call
+       jmp int_ret_from_sys_call_irqs_off
 
        /* Do syscall tracing */
 tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
 GLOBAL(int_ret_from_sys_call)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
+int_ret_from_sys_call_irqs_off:
        movl $_TIF_ALLWORK_MASK,%edi
        /* edi: mask to check */
 GLOBAL(int_with_check)
@@ -789,7 +799,21 @@ retint_swapgs:             /* return to user-space */
        cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)      /* R11 == RFLAGS */
        jne opportunistic_sysret_failed
 
-       testq $X86_EFLAGS_RF,%r11               /* sysret can't restore RF */
+       /*
+        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+        * restoring TF results in a trap from userspace immediately after
+        * SYSRET.  This would cause an infinite loop whenever #DB happens
+        * with register state that satisfies the opportunistic SYSRET
+        * conditions.  For example, single-stepping this user code:
+        *
+        *           movq $stuck_here,%rcx
+        *           pushfq
+        *           popq %r11
+        *   stuck_here:
+        *
+        * would never get past 'stuck_here'.
+        */
+       testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
        jnz opportunistic_sysret_failed
 
        /* nothing to check for RSP */
index 7ec1d5f8d28339bce0b74191a1d458c6c8e5d5df..25ecd56cefa8f22496153cf29b763c266bd8d91e 100644 (file)
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
        { "bx", 8, offsetof(struct pt_regs, bx) },
        { "cx", 8, offsetof(struct pt_regs, cx) },
        { "dx", 8, offsetof(struct pt_regs, dx) },
-       { "si", 8, offsetof(struct pt_regs, dx) },
+       { "si", 8, offsetof(struct pt_regs, si) },
        { "di", 8, offsetof(struct pt_regs, di) },
        { "bp", 8, offsetof(struct pt_regs, bp) },
        { "sp", 8, offsetof(struct pt_regs, sp) },
index bae6c609888e7fdff25784d5bd96fd8dcd5ea88a..86db4bcd7ce52bcb74a5bf42efcd8e7152488cf1 100644 (file)
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
 
+       /* ASRock */
+       {       /* Handle problems with rebooting on ASRock Q1900DC-ITX */
+               .callback = set_pci_reboot,
+               .ident = "ASRock Q1900DC-ITX",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+               },
+       },
+
        /* ASUS */
        {       /* Handle problems with rebooting on ASUS P4S800 */
                .callback = set_bios_reboot,
index b1947e0f3e100d7552add9688125d9c86d29a8ab..46d4449772bc714daa658ea6424fb45659095c70 100644 (file)
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
                        struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 {
        int i;
+       struct kvm_lapic *apic = vcpu->arch.apic;
 
        for (i = 0; i < IOAPIC_NUM_PINS; i++) {
                union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
                kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
                spin_lock(&ioapic->lock);
 
-               if (trigger_mode != IOAPIC_LEVEL_TRIG)
+               if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+                   kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
                        continue;
 
                ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
index bd4e34de24c7a0860de0adf88d3b85fd94f8ddae..4ee827d7bf36f730c25d358f709aa99cda93260a 100644 (file)
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 
 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
 {
-       if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
-           kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+       if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
                int trigger_mode;
                if (apic_test_vector(vector, apic->regs + APIC_TMR))
                        trigger_mode = IOAPIC_LEVEL_TRIG;
index 10a481b7674de285a45de0a1675ee60a9318e19d..ae4f6d35d19c268315745741150dd6d1a7df5222 100644 (file)
@@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        if (enable_ept) {
                /* nested EPT: emulate EPT also to L1 */
                vmx->nested.nested_vmx_secondary_ctls_high |=
-                       SECONDARY_EXEC_ENABLE_EPT |
-                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
+                       SECONDARY_EXEC_ENABLE_EPT;
                vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
                         VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
                         VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        } else
                vmx->nested.nested_vmx_ept_caps = 0;
 
+       if (enable_unrestricted_guest)
+               vmx->nested.nested_vmx_secondary_ctls_high |=
+                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                vmx->nested.nested_vmx_misc_low,
index 9f93af56a5fc7bd4cf263406faf96f8a5fa56466..b47124d4cd67e29199fae1c48f9a94ecd93b00f7 100644 (file)
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
 static DEFINE_SPINLOCK(p2m_update_lock);
 
 static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
 void __init xen_vmalloc_p2m_tree(void)
 {
        static struct vm_struct vm;
+       unsigned long p2m_limit;
 
+       p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
-       vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+       vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
                        PMD_SIZE * PMDS_PER_MID_PAGE);
        vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
        pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
index fc1ff3b1ea1f4a9ea2dcd18d6e49eea7281eb9b3..fd3fee81c23ce2f1cdc73d2bf4e76188c90358cb 100644 (file)
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
                struct bio_vec *bprev;
 
-               bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+               bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
                if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
                        return false;
        }
index d53a764b05eacde776a4454054795d1c0277d676..be3290cc0644efc9e3feec6fd891c7afe1a15775 100644 (file)
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
                /*
                 * We're out of tags on this hardware queue, kick any
                 * pending IO submits before going to sleep waiting for
-                * some to complete.
+                * some to complete. Note that hctx can be NULL here for
+                * reserved tag allocation.
                 */
-               blk_mq_run_hw_queue(hctx, false);
+               if (hctx)
+                       blk_mq_run_hw_queue(hctx, false);
 
                /*
                 * Retry tag allocation after running the hardware queue,
index 4f4bea21052e41068112ead8cbb4e0a42cb7a9d6..b7b8933ec24188b2807229af1aa844503b46dc89 100644 (file)
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
         */
        if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
                            PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_map;
+               goto err_mq_usage;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
        if (blk_mq_init_hw_queues(q, set))
-               goto err_hw;
+               goto err_mq_usage;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        return q;
 
-err_hw:
+err_mq_usage:
        blk_cleanup_queue(q);
 err_hctxs:
        kfree(map);
index 6ed2cbe5e8c9ae340233b0491255dfed40947590..12600bfffca93f4547e2325eeda9669ff443a7a7 100644 (file)
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                     b->physical_block_size);
 
        t->io_min = max(t->io_min, b->io_min);
-       t->io_opt = lcm(t->io_opt, b->io_opt);
+       t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
        t->cluster &= b->cluster;
        t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                    b->raid_partial_stripes_expensive);
 
        /* Find lowest common alignment_offset */
-       t->alignment_offset = lcm(t->alignment_offset, alignment)
+       t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
                % max(t->physical_block_size, t->io_min);
 
        /* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+               t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
                        t->discard_granularity;
        }
 
index 4c35f0822d06e54dd5399a285d22cbcfe174b115..23dac3babfe3afc710db73a2ad4f8fe05174b7d7 100644 (file)
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M[56]*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Micron_M5[15]0*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*M550*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 850 PRO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Crucial_CT*SSD*",            NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         */
        { "INTEL*SSDSC2MH*",            NULL,   0, },
 
+       { "Micron*",                    NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial*",                   NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "INTEL*SSD*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
                return NULL;
 
        /* libsas case */
-       if (!ap->scsi_host) {
+       if (ap->flags & ATA_FLAG_SAS_HOST) {
                tag = ata_sas_allocate_tag(ap);
                if (tag < 0)
                        return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
        tag = qc->tag;
        if (likely(ata_tag_valid(tag))) {
                qc->tag = ATA_TAG_POISON;
-               if (!ap->scsi_host)
+               if (ap->flags & ATA_FLAG_SAS_HOST)
                        ata_sas_free_tag(tag, ap);
        }
 }
index beb8b27d4621a6d9f839065c1296fa8ab67f3032..a13587b5c2be32b912f9fe0fd7544761a280bd57 100644 (file)
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_flat_ops;
 
+static inline const char *regmap_name(const struct regmap *map)
+{
+       if (map->dev)
+               return dev_name(map->dev);
+
+       return map->name;
+}
+
 #endif
index da84f544c5443da0cf3928dd390ec81dcd77f669..87db9893b463ba505acc3c28e253b5a08cf20c90 100644 (file)
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
                ret = map->cache_ops->read(map, reg, value);
 
                if (ret == 0)
-                       trace_regmap_reg_read_cache(map->dev, reg, *value);
+                       trace_regmap_reg_read_cache(map, reg, *value);
 
                return ret;
        }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
        dev_dbg(map->dev, "Syncing %s cache\n",
                map->cache_ops->name);
        name = map->cache_ops->name;
-       trace_regcache_sync(map->dev, name, "start");
+       trace_regcache_sync(map, name, "start");
 
        if (!map->cache_dirty)
                goto out;
@@ -346,7 +346,7 @@ out:
 
        regmap_async_complete(map);
 
-       trace_regcache_sync(map->dev, name, "stop");
+       trace_regcache_sync(map, name, "stop");
 
        return ret;
 }
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
        name = map->cache_ops->name;
        dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
 
-       trace_regcache_sync(map->dev, name, "start region");
+       trace_regcache_sync(map, name, "start region");
 
        if (!map->cache_dirty)
                goto out;
@@ -401,7 +401,7 @@ out:
 
        regmap_async_complete(map);
 
-       trace_regcache_sync(map->dev, name, "stop region");
+       trace_regcache_sync(map, name, "stop region");
 
        return ret;
 }
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
 
        map->lock(map->lock_arg);
 
-       trace_regcache_drop_region(map->dev, min, max);
+       trace_regcache_drop_region(map, min, max);
 
        ret = map->cache_ops->drop(map, min, max);
 
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
        map->lock(map->lock_arg);
        WARN_ON(map->cache_bypass && enable);
        map->cache_only = enable;
-       trace_regmap_cache_only(map->dev, enable);
+       trace_regmap_cache_only(map, enable);
        map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
        map->lock(map->lock_arg);
        WARN_ON(map->cache_only && enable);
        map->cache_bypass = enable;
-       trace_regmap_cache_bypass(map->dev, enable);
+       trace_regmap_cache_bypass(map, enable);
        map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
index f99b098ddabfbd23dae3ab3ee7733b9ff24e28ef..dbfe6a69c3daa67969df7c670f9e5144a053afb6 100644 (file)
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
        if (map->async && map->bus->async_write) {
                struct regmap_async *async;
 
-               trace_regmap_async_write_start(map->dev, reg, val_len);
+               trace_regmap_async_write_start(map, reg, val_len);
 
                spin_lock_irqsave(&map->async_lock, flags);
                async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                return ret;
        }
 
-       trace_regmap_hw_write_start(map->dev, reg,
-                                   val_len / map->format.val_bytes);
+       trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
 
        /* If we're doing a single register write we can probably just
         * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                kfree(buf);
        }
 
-       trace_regmap_hw_write_done(map->dev, reg,
-                                  val_len / map->format.val_bytes);
+       trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
 
        return ret;
 }
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
 
        map->format.format_write(map, reg, val);
 
-       trace_regmap_hw_write_start(map->dev, reg, 1);
+       trace_regmap_hw_write_start(map, reg, 1);
 
        ret = map->bus->write(map->bus_context, map->work_buf,
                              map->format.buf_size);
 
-       trace_regmap_hw_write_done(map->dev, reg, 1);
+       trace_regmap_hw_write_done(map, reg, 1);
 
        return ret;
 }
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
                dev_info(map->dev, "%x <= %x\n", reg, val);
 #endif
 
-       trace_regmap_reg_write(map->dev, reg, val);
+       trace_regmap_reg_write(map, reg, val);
 
        return map->reg_write(context, reg, val);
 }
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
        for (i = 0; i < num_regs; i++) {
                int reg = regs[i].reg;
                int val = regs[i].def;
-               trace_regmap_hw_write_start(map->dev, reg, 1);
+               trace_regmap_hw_write_start(map, reg, 1);
                map->format.format_reg(u8, reg, map->reg_shift);
                u8 += reg_bytes + pad_bytes;
                map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
 
        for (i = 0; i < num_regs; i++) {
                int reg = regs[i].reg;
-               trace_regmap_hw_write_done(map->dev, reg, 1);
+               trace_regmap_hw_write_done(map, reg, 1);
        }
        return ret;
 }
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
         */
        u8[0] |= map->read_flag_mask;
 
-       trace_regmap_hw_read_start(map->dev, reg,
-                                  val_len / map->format.val_bytes);
+       trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
 
        ret = map->bus->read(map->bus_context, map->work_buf,
                             map->format.reg_bytes + map->format.pad_bytes,
                             val, val_len);
 
-       trace_regmap_hw_read_done(map->dev, reg,
-                                 val_len / map->format.val_bytes);
+       trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
 
        return ret;
 }
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
                        dev_info(map->dev, "%x => %x\n", reg, *val);
 #endif
 
-               trace_regmap_reg_read(map->dev, reg, *val);
+               trace_regmap_reg_read(map, reg, *val);
 
                if (!map->cache_bypass)
                        regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
        struct regmap *map = async->map;
        bool wake;
 
-       trace_regmap_async_io_complete(map->dev);
+       trace_regmap_async_io_complete(map);
 
        spin_lock(&map->async_lock);
        list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
        if (!map->bus || !map->bus->async_write)
                return 0;
 
-       trace_regmap_async_complete_start(map->dev);
+       trace_regmap_async_complete_start(map);
 
        wait_event(map->async_waitq, regmap_async_is_done(map));
 
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
        map->async_ret = 0;
        spin_unlock_irqrestore(&map->async_lock, flags);
 
-       trace_regmap_async_complete_done(map->dev);
+       trace_regmap_async_complete_done(map);
 
        return ret;
 }
index 4bc2a5cb9935fbc6f5256102bdf9c2c24b9e6d8c..a98c41f72c63f4ceac0cef519f12a7b1b5be7702 100644 (file)
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
                return -EINVAL;
        }
 
-       nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
-       if (!nbd_dev)
-               return -ENOMEM;
-
        part_shift = 0;
        if (max_part > 0) {
                part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
        if (nbds_max > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
 
+       nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+       if (!nbd_dev)
+               return -ENOMEM;
+
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = alloc_disk(1 << part_shift);
                if (!disk)
index ceb32dd52a6ca5a777e97541646866985251a9f0..e23be20a341752c1b175cc05612c7e087ee24fa4 100644 (file)
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        get_device(dev->device);
 
+       INIT_LIST_HEAD(&dev->node);
        INIT_WORK(&dev->probe_work, nvme_async_probe);
        schedule_work(&dev->probe_work);
        return 0;
index 7e7146f735c6821afe5f79b6b7b75cfb1668b027..b4ac7cfae4418d3ed904db7e697078ffa00cab86 100644 (file)
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
 config SH_TIMER_CMT
        bool "Renesas CMT timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_CMT
        help
          This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
 config SH_TIMER_MTU2
        bool "Renesas MTU2 timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_MTU2
        help
          This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
 config SH_TIMER_TMU
        bool "Renesas TMU timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_TMU
        help
          This enables build of a clocksource and clockevent driver for
index 5dcbf90b8015ce40787d2acf27a53a142d4aed8d..58597fbcc046f27f88238aa949730d6109a704b7 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/irq.h>
 #include <linux/irqreturn.h>
 #include <linux/reset.h>
-#include <linux/sched_clock.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
        .dev_id = &sun5i_clockevent,
 };
 
-static u64 sun5i_timer_sched_read(void)
-{
-       return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
-}
-
 static void __init sun5i_timer_init(struct device_node *node)
 {
        struct reset_control *rstc;
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
        writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
               timer_base + TIMER_CTL_REG(1));
 
-       sched_clock_register(sun5i_timer_sched_read, 32, rate);
        clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
                              rate, 340, 32, clocksource_mmio_readl_down);
 
index 0723096fb50ac125dbb471126ba396085307ff2e..c92d6a70ccf303c69cfdb127210a09a1262122bc 100644 (file)
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
         * c->desc is NULL and exit.)
         */
        if (c->desc) {
+               bcm2835_dma_desc_free(&c->desc->vd);
                c->desc = NULL;
                bcm2835_dma_abort(c->chan_base);
 
index 4527a3ebeac446f58a4c3b3b8722d6caf16b1b63..84884418fd30fc73a700bde9c0bebd67d72ea0a5 100644 (file)
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
        kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
 }
 
+#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
 static int jz4740_dma_probe(struct platform_device *pdev)
 {
        struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
        dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
        dd->device_config = jz4740_dma_slave_config;
        dd->device_terminate_all = jz4740_dma_terminate_all;
+       dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
+       dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
+       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
 
index 276157f22612dc18140b84294ab1ef49ecf46ebc..53dbd3b3384cfd8b00940f7a7cd752b28bcac71a 100644 (file)
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
         */
        if (echan->edesc) {
                int cyclic = echan->edesc->cyclic;
+
+               /*
+                * free the running request descriptor
+                * since it is not in any of the vdesc lists
+                */
+               edma_desc_free(&echan->edesc->vdesc);
+
                echan->edesc = NULL;
                edma_stop(echan->ch_num);
                /* Move the cyclic channel back to default queue */
index 15cab7d79525914d862e5cd38344bc2e3046c95a..b4634109e0100905dd39d285f03d669c405cbef0 100644 (file)
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
 
        spin_lock_irqsave(&ch->vc.lock, flags);
 
-       if (ch->desc)
+       if (ch->desc) {
+               moxart_dma_desc_free(&ch->desc->vd);
                ch->desc = NULL;
+       }
 
        ctrl = readl(ch->base + REG_OFF_CTRL);
        ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
index 7dd6dd1216819543aae06f2d3dd2707d16917c24..167dbaf6574275a0fffd5590b7a169e811320b39 100644 (file)
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
         * c->desc is NULL and exit.)
         */
        if (c->desc) {
+               omap_dma_desc_free(&c->desc->vd);
                c->desc = NULL;
                /* Avoid stopping the dma twice */
                if (!c->paused)
index 69fac068669fde566f41013cefbdf48db023466c..2eebd28b4c40af2789c32e0008f2b60006fc03ac 100644 (file)
@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
        int i = 0;
 
        /*
-        *      Stop when we see all the items the table claimed to have
-        *      OR we run off the end of the table (also happens)
+        * Stop when we have seen all the items the table claimed to have
+        * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+        * off the end of the table (should never happen but sometimes does
+        * on bogus implementations.)
         */
-       while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+       while ((!num || i < num) &&
+              (data - buf + sizeof(struct dmi_header)) <= len) {
                const struct dmi_header *dm = (const struct dmi_header *)data;
 
                /*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
        if (memcmp(buf, "_SM3_", 5) == 0 &&
            buf[6] < 32 && dmi_checksum(buf, buf[6])) {
                dmi_ver = get_unaligned_be16(buf + 7);
+               dmi_num = 0;                    /* No longer specified */
                dmi_len = get_unaligned_le32(buf + 12);
                dmi_base = get_unaligned_le64(buf + 16);
 
-               /*
-                * The 64-bit SMBIOS 3.0 entry point no longer has a field
-                * containing the number of structures present in the table.
-                * Instead, it defines the table size as a maximum size, and
-                * relies on the end-of-table structure type (#127) to be used
-                * to signal the end of the table.
-                * So let's define dmi_num as an upper bound as well: each
-                * structure has a 4 byte header, so dmi_len / 4 is an upper
-                * bound for the number of structures in the table.
-                */
-               dmi_num = dmi_len / 4;
-
                if (dmi_walk_early(dmi_decode) == 0) {
                        pr_info("SMBIOS %d.%d present.\n",
                                dmi_ver >> 8, dmi_ver & 0xFF);
index a6952ba343a89747b919b45aa4d9e07951762fcd..a65b75161aa49d16749258407db18c7cfc30f79f 100644 (file)
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+static struct of_device_id mpc8xxx_gpio_ids[] = {
        { .compatible = "fsl,mpc8349-gpio", },
        { .compatible = "fsl,mpc8572-gpio", },
        { .compatible = "fsl,mpc8610-gpio", },
index 257e2989215c035b87fbef2f7b086c58b722e266..045a952576c708e253de29438abaec95640989f2 100644 (file)
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
                ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
                                                 &priv->dir_reg_offset);
                if (ret)
-                       dev_err(dev, "can't read the dir register offset!\n");
+                       dev_dbg(dev, "can't read the dir register offset!\n");
 
                priv->dir_reg_offset <<= 3;
        }
index c0929d938ced866e343e0230e00fc5c9cda77c0b..df990f29757a7e045fd8a42760944b2d8bd2ba84 100644 (file)
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        if (!handler)
                return AE_BAD_PARAMETER;
 
+       pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+       if (pin < 0)
+               return AE_BAD_PARAMETER;
+
        desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
        if (IS_ERR(desc)) {
                dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                struct gpio_desc *desc;
                bool found;
 
+               pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+               if (pin < 0) {
+                       status = AE_BAD_PARAMETER;
+                       goto out;
+               }
+
                mutex_lock(&achip->conn_lock);
 
                found = false;
index f6d04c7b5115a965bce68ad265080a46fcad3a6b..679b10e34fb545f23c827f9699bfef1c9f4268bd 100644 (file)
@@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
 }
 EXPORT_SYMBOL(drm_framebuffer_reference);
 
-static void drm_framebuffer_free_bug(struct kref *kref)
-{
-       BUG();
-}
-
-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-       DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
-       kref_put(&fb->refcount, drm_framebuffer_free_bug);
-}
-
 /**
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * @fb: fb to unregister
@@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
                return;
        }
        /* disconnect the plane from the fb and crtc: */
-       __drm_framebuffer_unreference(plane->old_fb);
+       drm_framebuffer_unreference(plane->old_fb);
        plane->old_fb = NULL;
        plane->fb = NULL;
        plane->crtc = NULL;
index 732cb6f8e653f58dee7102f0bb11b797cde2f5ad..4c0aa97aaf0399a111fe0d63be6d996bafbfd7f3 100644 (file)
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
+       drm_edid_to_eld(connector, edid);
        kfree(edid);
 
        return ret;
index 6591d48c1b9d0f3bcc4a74a8da730833cbc4e839..3fee587bc284ebffc7b2050c69034cba9c1949da 100644 (file)
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
                        struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
                        count = drm_add_edid_modes(connector, edid);
+                       drm_edid_to_eld(connector, edid);
                } else
                        count = (*connector_funcs->get_modes)(connector);
        }
index c300e22da8ac5f2f4294c27543e62291ee0a8739..33a10ce967eacdfbcbc679a8645baae4515b4f1b 100644 (file)
@@ -147,6 +147,7 @@ struct fimd_win_data {
        unsigned int            ovl_height;
        unsigned int            fb_width;
        unsigned int            fb_height;
+       unsigned int            fb_pitch;
        unsigned int            bpp;
        unsigned int            pixel_format;
        dma_addr_t              dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
        win_data->offset_y = plane->crtc_y;
        win_data->ovl_width = plane->crtc_width;
        win_data->ovl_height = plane->crtc_height;
+       win_data->fb_pitch = plane->pitch;
        win_data->fb_width = plane->fb_width;
        win_data->fb_height = plane->fb_height;
        win_data->dma_addr = plane->dma_addr[0] + offset;
        win_data->bpp = plane->bpp;
        win_data->pixel_format = plane->pixel_format;
-       win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
-                               (plane->bpp >> 3);
+       win_data->buf_offsize =
+               plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
        win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
 
        DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
        writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
        /* buffer end address */
-       size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+       size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
        val = (unsigned long)(win_data->dma_addr + size);
        writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
index 3518bc4654c5c9381acdb8d7253395e0466c8adc..2e3bc57ea50e594dcdf739042c39169428ecbda1 100644 (file)
@@ -55,6 +55,7 @@ struct hdmi_win_data {
        unsigned int            fb_x;
        unsigned int            fb_y;
        unsigned int            fb_width;
+       unsigned int            fb_pitch;
        unsigned int            fb_height;
        unsigned int            src_width;
        unsigned int            src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        } else {
                luma_addr[0] = win_data->dma_addr;
                chroma_addr[0] = win_data->dma_addr
-                       + (win_data->fb_width * win_data->fb_height);
+                       + (win_data->fb_pitch * win_data->fb_height);
        }
 
        if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
                        luma_addr[1] = luma_addr[0] + 0x40;
                        chroma_addr[1] = chroma_addr[0] + 0x40;
                } else {
-                       luma_addr[1] = luma_addr[0] + win_data->fb_width;
-                       chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+                       luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
+                       chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
                }
        } else {
                ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
        /* setting size of input image */
-       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
                VP_IMG_VSIZE(win_data->fb_height));
        /* chroma height has to reduced by 2 to avoid chroma distorions */
-       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
                VP_IMG_VSIZE(win_data->fb_height / 2));
 
        vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
        /* converting dma address base and source offset */
        dma_addr = win_data->dma_addr
                + (win_data->fb_x * win_data->bpp >> 3)
-               + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+               + (win_data->fb_y * win_data->fb_pitch);
        src_x_offset = 0;
        src_y_offset = 0;
 
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
                MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
 
        /* setup geometry */
-       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+                       win_data->fb_pitch / (win_data->bpp >> 3));
 
        /* setup display size */
        if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
        win_data->fb_y = plane->fb_y;
        win_data->fb_width = plane->fb_width;
        win_data->fb_height = plane->fb_height;
+       win_data->fb_pitch = plane->pitch;
        win_data->src_width = plane->src_width;
        win_data->src_height = plane->src_height;
 
index 5b205863b6596d7fa72e6f465a017d9bcb204e78..27ea6bdebce761dbd8dca5340a4cd6e2bd32eaa3 100644 (file)
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
        WARN_ON(i915_verify_lists(ring->dev));
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate,
-        * before we free the context associated with the requests.
+       /* Retire requests first as we use it above for the early return.
+        * If we retire requests last, we may use a later seqno and so clear
+        * the requests lists without clearing the active list, leading to
+        * confusion.
         */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_gem_request_completed(obj->last_read_req, true))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
-
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
                struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                i915_gem_free_request(request);
        }
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_gem_request_completed(obj->last_read_req, true))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
        if (unlikely(ring->trace_irq_req &&
                     i915_gem_request_completed(ring->trace_irq_req, true))) {
                ring->irq_put(ring);
index b773368fc62c8ac67717f6770ce20fd642a1bc8c..38a742532c4fa48ba2798d6aec910b546839090f 100644 (file)
@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
-       if (i915_needs_cmd_parser(ring)) {
+       if (i915_needs_cmd_parser(ring) && args->batch_len) {
                batch_obj = i915_gem_execbuffer_parse(ring,
                                                      &shadow_exec_entry,
                                                      eb,
index 6d22128d97b1b8ce732208fe93ed897dc0a2cdcd..f75173c20f47677f1a8462dabc78b10ac7b4645f 100644 (file)
@@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
        if (!intel_crtc->base.primary->fb)
                return;
 
-       if (intel_alloc_plane_obj(intel_crtc, plane_config))
+       if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
+               struct drm_plane *primary = intel_crtc->base.primary;
+
+               primary->state->crtc = &intel_crtc->base;
+               primary->crtc = &intel_crtc->base;
+               update_state_fb(primary);
+
                return;
+       }
 
        kfree(intel_crtc->base.primary->fb);
        intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
                        continue;
 
                if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+                       struct drm_plane *primary = intel_crtc->base.primary;
+
                        if (obj->tiling_mode != I915_TILING_NONE)
                                dev_priv->preserve_bios_swizzle = true;
 
                        drm_framebuffer_reference(c->primary->fb);
-                       intel_crtc->base.primary->fb = c->primary->fb;
+                       primary->fb = c->primary->fb;
+                       primary->state->crtc = &intel_crtc->base;
+                       primary->crtc = &intel_crtc->base;
                        obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
                        break;
                }
@@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
 }
 
 static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
        return;
 
 error:
@@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
 }
 
 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
index 0a52c44ad03d6b21078fe7482ddc3b84813ac9c4..9c5451c97942801544bbe9a0e692810bb5a9444e 100644 (file)
@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
        drm_modeset_lock_all(dev);
 
        plane = drm_plane_find(dev, set->plane_id);
-       if (!plane) {
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
                ret = -ENOENT;
                goto out_unlock;
        }
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
        drm_modeset_lock_all(dev);
 
        plane = drm_plane_find(dev, get->plane_id);
-       if (!plane) {
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
                ret = -ENOENT;
                goto out_unlock;
        }
index c648e1996dabac449dfb838e018cad85b2d3bb61..243a36c93b8f96c0f268ef9bfcae1721043c7240 100644 (file)
 #define VCE_UENC_REG_CLOCK_GATING      0x207c0
 #define VCE_SYS_INT_EN                 0x21300
 #      define VCE_SYS_INT_TRAP_INTERRUPT_EN    (1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR   0x2145c
 #define VCE_LMI_CTRL2                  0x21474
 #define VCE_LMI_CTRL                   0x21498
 #define VCE_LMI_VM_CTRL                        0x214a0
index 5587603b4a891c1f2cfcf7873dd8aa9e173907df..33d5a4f4eebdc21118b733957a2e1f75230a55cb 100644 (file)
@@ -1565,6 +1565,7 @@ struct radeon_dpm {
        int                     new_active_crtc_count;
        u32                     current_active_crtcs;
        int                     current_active_crtc_count;
+       bool single_display;
        struct radeon_dpm_dynamic_state dyn_state;
        struct radeon_dpm_fan fan;
        u32 tdp_limit;
index 63ccb8fa799c209bc82db257d3da0c8fc60ba052..d27e4ccb848c9c60e8a14f71336b790b125d2231 100644 (file)
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
 
 static bool radeon_read_bios(struct radeon_device *rdev)
 {
-       uint8_t __iomem *bios;
+       uint8_t __iomem *bios, val1, val2;
        size_t size;
 
        rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
                return false;
        }
 
-       if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+       val1 = readb(&bios[0]);
+       val2 = readb(&bios[1]);
+
+       if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
-       rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+       rdev->bios = kzalloc(size, GFP_KERNEL);
        if (rdev->bios == NULL) {
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
+       memcpy_fromio(rdev->bios, bios, size);
        pci_unmap_rom(rdev->pdev, bios);
        return true;
 }
index a69bd441dd2d0cc612b9fa4d9e7711934270e3c5..572b4dbec186a9d59e8066782773c86acdbfa46b 100644 (file)
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
        it = interval_tree_iter_first(&rmn->objects, start, end);
        while (it) {
                struct radeon_bo *bo;
-               struct fence *fence;
                int r;
 
                bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                        continue;
                }
 
-               fence = reservation_object_get_excl(bo->tbo.resv);
-               if (fence) {
-                       r = radeon_fence_wait((struct radeon_fence *)fence, false);
-                       if (r)
-                               DRM_ERROR("(%d) failed to wait for user bo\n", r);
-               }
+               r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
+                       false, MAX_SCHEDULE_TIMEOUT);
+               if (r)
+                       DRM_ERROR("(%d) failed to wait for user bo\n", r);
 
                radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
index 33cf4108386dbba4ef70a0e372eb992d1ff7e4d3..c1ba83a8dd8c9333829aaa5f1384f65c7103d238 100644 (file)
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
        radeon_pm_compute_clocks(rdev);
 }
 
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
-                                                    enum radeon_pm_state_type dpm_state)
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
 {
-       int i;
-       struct radeon_ps *ps;
-       u32 ui_class;
        bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
                true : false;
 
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
        if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
                single_display = false;
 
+       return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+                                                    enum radeon_pm_state_type dpm_state)
+{
+       int i;
+       struct radeon_ps *ps;
+       u32 ui_class;
+       bool single_display = radeon_dpm_single_display(rdev);
+
        /* certain older asics have a separare 3D performance state,
         * so try that first if the user selected performance
         */
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
        struct radeon_ps *ps;
        enum radeon_pm_state_type dpm_state;
        int ret;
+       bool single_display = radeon_dpm_single_display(rdev);
 
        /* if dpm init failed */
        if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
                /* vce just modifies an existing state so force a change */
                if (ps->vce_active != rdev->pm.dpm.vce_active)
                        goto force;
+               /* user has made a display change (such as timing) */
+               if (rdev->pm.dpm.single_display != single_display)
+                       goto force;
                if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
                        /* for pre-BTC and APUs if the num crtcs changed but state is the same,
                         * all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
 
        rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
        rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+       rdev->pm.dpm.single_display = single_display;
 
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
index 2456f69efd2310233fac5c75a314a9060abe1a17..8c7872339c2a6f5e94bf601c47ed421e5caee352 100644 (file)
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
 
-       if (!ring->ready)
+       if (!ring->ring)
                return 0;
 
        /* print 8 dw before current rptr as often it's the last executed
index d02aa1d0f5885408c877056bd4ac1ab0e1ed6f12..b292aca0f342d53856ec3eaf982b71fd8b0a7fa8 100644 (file)
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
+       /* double check that we don't free the table twice */
+       if (!ttm->sg->sgl)
+               return;
+
        /* free the sg table and pages again */
        dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
index 1ac7bb825a1b3bfecea601d76f8e33c2b8d581ff..fbbe78fbd087ae7c147a43b925f0ac0401c86466 100644 (file)
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
        WREG32(VCE_LMI_SWAP_CNTL1, 0);
        WREG32(VCE_LMI_VM_CTRL, 0);
 
+       WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+       addr &= 0xff;
        size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
        WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
        WREG32(VCE_VCPU_CACHE_SIZE0, size);
index 1096da327130526080139e628fdd0a0130d32861..75c6d2103e07adad3b11919687e81f8dd7a8fca3 100644 (file)
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = bma180_get_data_reg(data, bit);
                if (ret < 0) {
index 066d0c04072c69943fa21313fb47f06c72fedcea..75567fd457dcc4b9bd7c147fdc4cb229cdbaf0c4 100644 (file)
@@ -168,14 +168,14 @@ static const struct {
        int val;
        int val2;
        u8 bw_bits;
-} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
-                                    {15, 630000, 0x09},
-                                    {31, 250000, 0x0A},
-                                    {62, 500000, 0x0B},
-                                    {125, 0, 0x0C},
-                                    {250, 0, 0x0D},
-                                    {500, 0, 0x0E},
-                                    {1000, 0, 0x0F} };
+} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
+                                    {31, 260000, 0x09},
+                                    {62, 500000, 0x0A},
+                                    {125, 0, 0x0B},
+                                    {250, 0, 0x0C},
+                                    {500, 0, 0x0D},
+                                    {1000, 0, 0x0E},
+                                    {2000, 0, 0x0F} };
 
 static const struct {
        int bw_bits;
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
 }
 
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
-               "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
+               "15.620000 31.260000 62.50000 125 250 500 1000 2000");
 
 static struct attribute *bmc150_accel_attributes[] = {
        &iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMC150_ACCEL_AXIS_TO_REG(bit));
index 567de269cc00650191541a98ac8d5818d0ea8661..1a6379525fa47e73497b17866be4276fc88c8065 100644 (file)
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kxcjk1013_get_acc_reg(data, bit);
                if (ret < 0) {
index 202daf889be276315d24575b1301349f295fe4c8..46379b1fb25b59b10018a121b2cc8dc78082d4a6 100644 (file)
@@ -137,7 +137,8 @@ config AXP288_ADC
 
 config CC10001_ADC
        tristate "Cosmic Circuits 10001 ADC driver"
-       depends on HAS_IOMEM || HAVE_CLK || REGULATOR
+       depends on HAVE_CLK || REGULATOR
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index ff61ae55dd3ff8ac73c0925c0b88fa6a75a1083d..8a0eb4a04fb55b9cb2436db5b16f678f654b8a26 100644 (file)
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 {
        struct iio_dev *idev = iio_trigger_get_drvdata(trig);
        struct at91_adc_state *st = iio_priv(idev);
-       struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
        int value;
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status | value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHER,
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status & ~value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHDR,
index 2e5cc4409f78884e82f309c729febabe3ff7f982..a0e7161f040c91daca74a469d27ff641be9ea915 100644 (file)
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
 static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
-       struct iio_buffer *buffer = indio_dev->buffer;
        unsigned int enb = 0;
        u8 bit;
 
        tiadc_step_config(indio_dev);
-       for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+       for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
                enb |= (get_adc_step_bit(adc_dev, bit) << 1);
        adc_dev->buffer_en_ch_steps = enb;
 
index 8ec353c01d98e02e7074df9d4d295ec19f772f09..e63b8e76d4c3d54edc25d23561f28a11afeb05e7 100644 (file)
@@ -141,9 +141,13 @@ struct vf610_adc {
        struct regulator *vref;
        struct vf610_adc_feature adc_feature;
 
+       u32 sample_freq_avail[5];
+
        struct completion completion;
 };
 
+static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+
 #define VF610_ADC_CHAN(_idx, _chan_type) {                     \
        .type = (_chan_type),                                   \
        .indexed = 1,                                           \
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
        /* sentinel */
 };
 
-/*
- * ADC sample frequency, unit is ADCK cycles.
- * ADC clk source is ipg clock, which is the same as bus clock.
- *
- * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
- * SFCAdder: fixed to 6 ADCK cycles
- * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
- * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
- *
- * By default, enable 12 bit resolution mode, clock source
- * set to ipg clock, So get below frequency group:
- */
-static const u32 vf610_sample_freq_avail[5] =
-{1941176, 559332, 286957, 145374, 73171};
+static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
+{
+       unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+       int i;
+
+       /*
+        * Calculate ADC sample frequencies
+        * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
+        * which is the same as bus clock.
+        *
+        * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+        * SFCAdder: fixed to 6 ADCK cycles
+        * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+        * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+        * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+        */
+       adck_rate = ipg_rate / info->adc_feature.clk_div;
+       for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
+               info->sample_freq_avail[i] =
+                       adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+}
 
 static inline void vf610_adc_cfg_init(struct vf610_adc *info)
 {
+       struct vf610_adc_feature *adc_feature = &info->adc_feature;
+
        /* set default Configuration for ADC controller */
-       info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
-       info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+       adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
+       adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+       adc_feature->calibration = true;
+       adc_feature->ovwren = true;
+
+       adc_feature->res_mode = 12;
+       adc_feature->sample_rate = 1;
+       adc_feature->lpm = true;
 
-       info->adc_feature.calibration = true;
-       info->adc_feature.ovwren = true;
+       /* Use a save ADCK which is below 20MHz on all devices */
+       adc_feature->clk_div = 8;
 
-       info->adc_feature.clk_div = 1;
-       info->adc_feature.res_mode = 12;
-       info->adc_feature.sample_rate = 1;
-       info->adc_feature.lpm = true;
+       vf610_adc_calculate_rates(info);
 }
 
 static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
 
        cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
 
-       /* low power configuration */
        cfg_data &= ~VF610_ADC_ADLPC_EN;
        if (adc_feature->lpm)
                cfg_data |= VF610_ADC_ADLPC_EN;
 
-       /* disable high speed */
        cfg_data &= ~VF610_ADC_ADHSC_EN;
 
        writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+static ssize_t vf610_show_samp_freq_avail(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
+       size_t len = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len,
+                       "%u ", info->sample_freq_avail[i]);
+
+       /* replace trailing space by newline */
+       buf[len - 1] = '\n';
+
+       return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
 
 static struct attribute *vf610_attributes[] = {
-       &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+       &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
        NULL
 };
 
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_FRACTIONAL_LOG2;
 
        case IIO_CHAN_INFO_SAMP_FREQ:
-               *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+               *val = info->sample_freq_avail[info->adc_feature.sample_rate];
                *val2 = 0;
                return IIO_VAL_INT;
 
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
        switch (mask) {
                case IIO_CHAN_INFO_SAMP_FREQ:
                        for (i = 0;
-                               i < ARRAY_SIZE(vf610_sample_freq_avail);
+                               i < ARRAY_SIZE(info->sample_freq_avail);
                                i++)
-                               if (val == vf610_sample_freq_avail[i]) {
+                               if (val == info->sample_freq_avail[i]) {
                                        info->adc_feature.sample_rate = i;
                                        vf610_adc_sample_set(info);
                                        return 0;
index 60451b32824212a039b8ac9358751faa1af12819..ccf3ea7e1afa8ca1848937b9a8b92e1f2cefc8be 100644 (file)
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMG160_AXIS_TO_REG(bit));
index e0017c22bb9c6ce3b4f4f9c753a8ce1364621b19..f53e9a803a0e1ec1589a5b0ad25d7aa1f8d54e70 100644 (file)
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
        iio_trigger_set_drvdata(adis->trig, adis);
        ret = iio_trigger_register(adis->trig);
 
-       indio_dev->trig = adis->trig;
+       indio_dev->trig = iio_trigger_get(adis->trig);
        if (ret)
                goto error_free_irq;
 
index d8d5bed65e072cae577968edb78e2e592c2a5bfa..ef76afe2643cb0bebe512124ca8c9326e09229c2 100644 (file)
@@ -410,42 +410,46 @@ error_read_raw:
        }
 }
 
-static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
-               return -EINVAL;
-       if (fsr == st->chip_config.fsr)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+               if (gyro_scale_6050[i] == val) {
+                       d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->gyro_config, d);
+                       if (result)
+                               return result;
 
-       d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
-       if (result)
-               return result;
-       st->chip_config.fsr = fsr;
+                       st->chip_config.fsr = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
-static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
-               return -EINVAL;
-       if (fs == st->chip_config.accl_fs)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+               if (accel_scale[i] == val) {
+                       d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->accl_config, d);
+                       if (result)
+                               return result;
 
-       d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
-       if (result)
-               return result;
-       st->chip_config.accl_fs = fs;
+                       st->chip_config.accl_fs = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
 static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                switch (chan->type) {
                case IIO_ANGL_VEL:
-                       result = inv_mpu6050_write_fsr(st, val);
+                       result = inv_mpu6050_write_gyro_scale(st, val2);
                        break;
                case IIO_ACCEL:
-                       result = inv_mpu6050_write_accel_fs(st, val);
+                       result = inv_mpu6050_write_accel_scale(st, val2);
                        break;
                default:
                        result = -EINVAL;
index 0cd306a72a6e347391ea97d7a02d05e54b6c64ab..ba27e277511fc52585f8fa25fd20d49bca61be96 100644 (file)
 #include <linux/poll.h>
 #include "inv_mpu_iio.h"
 
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+       unsigned long flags;
+
+       /* take the spin lock sem to avoid interrupt kick in */
+       spin_lock_irqsave(&st->time_stamp_lock, flags);
+       kfifo_reset(&st->timestamps);
+       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
 int inv_reset_fifo(struct iio_dev *indio_dev)
 {
        int result;
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
                                        INV_MPU6050_BIT_FIFO_RST);
        if (result)
                goto reset_fifo_fail;
+
+       /* clear timestamps fifo */
+       inv_clear_kfifo(st);
+
        /* enable interrupt */
        if (st->chip_config.accl_fifo_enable ||
            st->chip_config.gyro_fifo_enable) {
@@ -83,16 +97,6 @@ reset_fifo_fail:
        return result;
 }
 
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
-{
-       unsigned long flags;
-
-       /* take the spin lock sem to avoid interrupt kick in */
-       spin_lock_irqsave(&st->time_stamp_lock, flags);
-       kfifo_reset(&st->timestamps);
-       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
-}
-
 /**
  * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
  */
@@ -184,7 +188,6 @@ end_session:
 flush_fifo:
        /* Flush HW and SW FIFOs. */
        inv_reset_fifo(indio_dev);
-       inv_clear_kfifo(st);
        mutex_unlock(&indio_dev->mlock);
        iio_trigger_notify_done(indio_dev->trig);
 
index 5cc3692acf377664dbf255a698bb5e64606fe864..b3a36376c719317006cf8b9d30e1369f8b6af8fb 100644 (file)
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
                base = KMX61_MAG_XOUT_L;
 
        mutex_lock(&data->lock);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kmx61_read_measurement(data, base, bit);
                if (ret < 0) {
index aaba9d3d980ee623ad6b78111a9c9708198b23e0..4df97f650e448e80053e037fed6d5e208493c687 100644 (file)
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
  * @attr_list: List of IIO device attributes
  *
  * This function frees the memory allocated for each of the IIO device
- * attributes in the list. Note: if you want to reuse the list after calling
- * this function you have to reinitialize it using INIT_LIST_HEAD().
+ * attributes in the list.
  */
 void iio_free_chan_devattr_list(struct list_head *attr_list)
 {
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
 
        list_for_each_entry_safe(p, n, attr_list, l) {
                kfree(p->dev_attr.attr.name);
+               list_del(&p->l);
                kfree(p);
        }
 }
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 
        iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
        kfree(indio_dev->chan_attr_group.attrs);
+       indio_dev->chan_attr_group.attrs = NULL;
 }
 
 static void iio_dev_release(struct device *device)
index a4b397048f71f9fe2e22be46f45fca0fb06cb8ff..a99692ba91bc75fb3186f69f2d55bfc9efd6652c 100644 (file)
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
 error_free_setup_event_lines:
        iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
        kfree(indio_dev->event_interface);
+       indio_dev->event_interface = NULL;
        return ret;
 }
 
index 74dff4e4a11acdda1ec44ec6eaec75ef4d0543e4..89fca3a7075039b9b9a0de5514c1b96758d4593b 100644 (file)
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
                                            &val);
index aec7a6aa2951db47bc6b5be969a29d1867688b23..8c014b5dab4c82ff805a744c89655555a56094fc 100644 (file)
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        if (dmasync)
                dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
+       /*
+        * If the combination of the addr and size requested for this memory
+        * region causes an integer overflow, return error.
+        */
+       if ((PAGE_ALIGN(addr + size) <= size) ||
+           (PAGE_ALIGN(addr + size) <= addr))
+               return ERR_PTR(-EINVAL);
+
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
 
index 1bd15ebc01f2df5002eca38f7089a61701471f5d..33198b91bebfdcf622ac355bc599d97e43441b08 100644 (file)
@@ -2281,10 +2281,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 1360;
-               priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
+
                break;
 
        case ALPS_PROTO_V6:
@@ -2303,9 +2305,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
+               priv->x_max = 0xfff;
+               priv->y_max = 0x7ff;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
index dda605836546847afbdbdd4c77c976134a914a2e..3b06c8a360b661f02ed3aa826e5a996b93e5b358 100644 (file)
@@ -152,6 +152,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, ANY_BOARD_ID},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               {2691, 2691},
+               1024, 5045, 2457, 4832
+       },
        {
                (const char * const []){"LEN2006", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
-       "LEN2006",
+       "LEN2006", /* Edge E440/E540 */
        "LEN2007",
        "LEN2008",
        "LEN2009",
index fc13dd56953e1eb2cb25107ef017336546b733a7..a3adde6519f0a24b7150f8e69ceabed534ceaa38 100644 (file)
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
                return 0;
 
        spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
+                       smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                ret = arm_smmu_iova_to_phys_hard(domain, iova);
-       else
+       } else {
                ret = ops->iova_to_phys(ops, iova);
+       }
+
        spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
 
        return ret;
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 
-       if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+       if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
index ae4c1a854e57896fc64e33668369bebc23f70945..2d1e05bdbb53f5901035294a71c65231b004a338 100644 (file)
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        struct page *freelist = NULL;
+       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_active_iommu(iommu, drhd)
-               iommu_detach_domain(domain, iommu);
+       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
+               iommu_detach_domain(domain, g_iommus[i]);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index 10186cac7716e246ea8b8a6e915bf07970670f5b..bc39bdf7b99bf170d793965e28777d3c7a279537 100644 (file)
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
 
 static const struct of_device_id ipmmu_of_ids[] = {
        { .compatible = "renesas,ipmmu-vmsa", },
+       { }
 };
 
 static struct platform_driver ipmmu_driver = {
index 596b0a9eee99a9f2ea1beaac726bddc4069c4937..9687f8afebffbb865256ba6677663e6c76702aa1 100644 (file)
@@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
 
 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
 {
-       cmd->raw_cmd[0] &= ~(0xffffUL << 32);
+       cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
        cmd->raw_cmd[0] |= ((u64)devid) << 32;
 }
 
@@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
        int i;
        int psz = SZ_64K;
        u64 shr = GITS_BASER_InnerShareable;
+       u64 cache = GITS_BASER_WaWb;
 
        for (i = 0; i < GITS_BASER_NR_REGS; i++) {
                u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -848,7 +849,7 @@ retry_baser:
                val = (virt_to_phys(base)                                |
                       (type << GITS_BASER_TYPE_SHIFT)                   |
                       ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
-                      GITS_BASER_WaWb                                   |
+                      cache                                             |
                       shr                                               |
                       GITS_BASER_VALID);
 
@@ -874,9 +875,12 @@ retry_baser:
                         * Shareability didn't stick. Just use
                         * whatever the read reported, which is likely
                         * to be the only thing this redistributor
-                        * supports.
+                        * supports. If that's zero, make it
+                        * non-cacheable as well.
                         */
                        shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+                       if (!shr)
+                               cache = GITS_BASER_nC;
                        goto retry_baser;
                }
 
@@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
        tmp = readq_relaxed(rbase + GICR_PROPBASER);
 
        if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+               if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
+                       /*
+                        * The HW reports non-shareable, we must
+                        * remove the cacheability attributes as
+                        * well.
+                        */
+                       val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
+                                GICR_PROPBASER_CACHEABILITY_MASK);
+                       val |= GICR_PROPBASER_nC;
+                       writeq_relaxed(val, rbase + GICR_PROPBASER);
+               }
                pr_info_once("GIC: using cache flushing for LPI property table\n");
                gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
        }
 
        /* set PENDBASE */
        val = (page_to_phys(pend_page) |
-              GICR_PROPBASER_InnerShareable |
-              GICR_PROPBASER_WaWb);
+              GICR_PENDBASER_InnerShareable |
+              GICR_PENDBASER_WaWb);
 
        writeq_relaxed(val, rbase + GICR_PENDBASER);
+       tmp = readq_relaxed(rbase + GICR_PENDBASER);
+
+       if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+               /*
+                * The HW reports non-shareable, we must remove the
+                * cacheability attributes as well.
+                */
+               val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
+                        GICR_PENDBASER_CACHEABILITY_MASK);
+               val |= GICR_PENDBASER_nC;
+               writeq_relaxed(val, rbase + GICR_PENDBASER);
+       }
 
        /* Enable LPIs */
        val = readl_relaxed(rbase + GICR_CTLR);
@@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
                         * This ITS wants a linear CPU number.
                         */
                        target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
-                       target = GICR_TYPER_CPU_NUMBER(target);
+                       target = GICR_TYPER_CPU_NUMBER(target) << 16;
                }
 
                /* Perform collection mapping */
@@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
 
        writeq_relaxed(baser, its->base + GITS_CBASER);
        tmp = readq_relaxed(its->base + GITS_CBASER);
-       writeq_relaxed(0, its->base + GITS_CWRITER);
-       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
 
-       if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
+       if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
+               if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
+                       /*
+                        * The HW reports non-shareable, we must
+                        * remove the cacheability attributes as
+                        * well.
+                        */
+                       baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
+                                  GITS_CBASER_CACHEABILITY_MASK);
+                       baser |= GITS_CBASER_nC;
+                       writeq_relaxed(baser, its->base + GITS_CBASER);
+               }
                pr_info("ITS: using cache flushing for cmd queue\n");
                its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
        }
 
+       writeq_relaxed(0, its->base + GITS_CWRITER);
+       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
+
        if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
                its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
                if (!its->domain) {
index ee035ec4526bd802a89de51c6edc099d8bbb9747..169172d2ba05c8b4187b9151975dcd1be52043c2 100644 (file)
@@ -1,6 +1,6 @@
 config LGUEST
        tristate "Linux hypervisor example code"
-       depends on X86_32 && EVENTFD && TTY
+       depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
        select HVC_DRIVER
        ---help---
          This is a very simple module which allows you to run
index 9b641b38b857106000c6645035e7490c943f9233..8001fe9e3434734ad92c8109ef8fa860906238ce 100644 (file)
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 
        dm_get(md);
        atomic_inc(&md->open_count);
-
 out:
        spin_unlock(&_minor_lock);
 
@@ -442,16 +441,20 @@ out:
 
 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 {
-       struct mapped_device *md = disk->private_data;
+       struct mapped_device *md;
 
        spin_lock(&_minor_lock);
 
+       md = disk->private_data;
+       if (WARN_ON(!md))
+               goto out;
+
        if (atomic_dec_and_test(&md->open_count) &&
            (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
                queue_work(deferred_remove_workqueue, &deferred_remove_work);
 
        dm_put(md);
-
+out:
        spin_unlock(&_minor_lock);
 }
 
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
        int minor = MINOR(disk_devt(md->disk));
 
        unlock_fs(md);
-       bdput(md->bdev);
        destroy_workqueue(md->wq);
 
        if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
                mempool_destroy(md->rq_pool);
        if (md->bs)
                bioset_free(md->bs);
-       blk_integrity_unregister(md->disk);
-       del_gendisk(md->disk);
+
        cleanup_srcu_struct(&md->io_barrier);
        free_table_devices(&md->table_devices);
-       free_minor(minor);
+       dm_stats_cleanup(&md->stats);
 
        spin_lock(&_minor_lock);
        md->disk->private_data = NULL;
        spin_unlock(&_minor_lock);
-
+       if (blk_get_integrity(md->disk))
+               blk_integrity_unregister(md->disk);
+       del_gendisk(md->disk);
        put_disk(md->disk);
        blk_cleanup_queue(md->queue);
-       dm_stats_cleanup(&md->stats);
+       bdput(md->bdev);
+       free_minor(minor);
+
        module_put(THIS_MODULE);
        kfree(md);
 }
@@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
 
        might_sleep();
 
-       spin_lock(&_minor_lock);
        map = dm_get_live_table(md, &srcu_idx);
+
+       spin_lock(&_minor_lock);
        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
        set_bit(DMF_FREEING, &md->flags);
        spin_unlock(&_minor_lock);
index f38ec424872e362b934aaa6edd2fb21358bcb326..5615522f8d628b0f9d4b43d240d18d7bad02c835 100644 (file)
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
                for (id = kempld_dmi_table;
                     id->matches[0].slot != DMI_NONE; id++)
                        if (strstr(id->ident, force_device_id))
-                               if (id->callback && id->callback(id))
+                               if (id->callback && !id->callback(id))
                                        break;
                if (id->matches[0].slot == DMI_NONE)
                        return -ENODEV;
index ede50244f265b14d950fba0448652960304fa971..dbd907d7170ebe990cb63fede374624ea03b6fd4 100644 (file)
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
 int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
 {
        u16 value;
+       u8 *buf;
+       int ret;
 
        if (!data)
                return -EINVAL;
-       *data = 0;
+
+       buf = kzalloc(sizeof(u8), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
        addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
        value = swab16(addr);
 
-       return usb_control_msg(ucr->pusb_dev,
+       ret = usb_control_msg(ucr->pusb_dev,
                        usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
                        USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       value, 0, data, 1, 100);
+                       value, 0, buf, 1, 100);
+       *data = *buf;
+
+       kfree(buf);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
 
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
 int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
 {
        int ret;
+       u16 *buf;
 
        if (!status)
                return -EINVAL;
 
-       if (polling_pipe == 0)
+       if (polling_pipe == 0) {
+               buf = kzalloc(sizeof(u16), GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+
                ret = usb_control_msg(ucr->pusb_dev,
                                usb_rcvctrlpipe(ucr->pusb_dev, 0),
                                RTSX_USB_REQ_POLL,
                                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                               0, 0, status, 2, 100);
-       else
+                               0, 0, buf, 2, 100);
+               *status = *buf;
+
+               kfree(buf);
+       } else {
                ret = rtsx_usb_get_status_with_bulk(ucr, status);
+       }
 
        /* usb_control_msg may return positive when success */
        if (ret < 0)
index b979c265fc51d0a09c48ff4216a8b055ebcd1c26..089a4028859d121d5611d04dde0139196753fb00 100644 (file)
@@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
        /* Find out if any slaves have the same mapping as this skb. */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (slave->queue_id == skb->queue_mapping) {
-                       if (bond_slave_can_tx(slave)) {
+                       if (bond_slave_is_up(slave) &&
+                           slave->link == BOND_LINK_UP) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return 0;
                        }
index 80c46ad4cee439d2015b3ee7de66d578f54afd0b..ad0a7e8c2c2bdf33626824645a8180d8e6d900ff 100644 (file)
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
                rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
                           CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
                new_state = max(tx_state, rx_state);
-       } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+       } else {
                __flexcan_get_berr_counter(dev, &bec);
-               new_state = CAN_STATE_ERROR_PASSIVE;
+               new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+                           CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
                rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
                tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
-       } else {
-               new_state = CAN_STATE_BUS_OFF;
        }
 
        /* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
        const struct flexcan_devtype_data *devtype_data;
        struct net_device *dev;
        struct flexcan_priv *priv;
+       struct regulator *reg_xceiver;
        struct resource *mem;
        struct clk *clk_ipg = NULL, *clk_per = NULL;
        void __iomem *base;
        int err, irq;
        u32 clock_freq = 0;
 
+       reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+       if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else if (IS_ERR(reg_xceiver))
+               reg_xceiver = NULL;
+
        if (pdev->dev.of_node)
                of_property_read_u32(pdev->dev.of_node,
                                                "clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
 
-       priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
-       if (IS_ERR(priv->reg_xceiver))
-               priv->reg_xceiver = NULL;
+       priv->reg_xceiver = reg_xceiver;
 
        netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
 
index 009acc8641fc557cb580cb688983daf041519e4b..8b4d3e6875eb17e6bca38c812132953d0c5ce2c2 100644 (file)
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
        }
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
        init_usb_anchor(&dev->rx_submitted);
 
        atomic_set(&dev->active_channels, 0);
index e97a08ce0b90c298577fed09c96dc7a711076e5e..57611fd91229a4580987b3519d56b8913f185411 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#define MAX_TX_URBS                    16
 #define MAX_RX_URBS                    4
 #define START_TIMEOUT                  1000 /* msecs */
 #define STOP_TIMEOUT                   1000 /* msecs */
@@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
        };
 };
 
+/* Context for an outstanding, not yet ACKed, transmission */
 struct kvaser_usb_tx_urb_context {
        struct kvaser_usb_net_priv *priv;
        u32 echo_index;
@@ -456,8 +456,13 @@ struct kvaser_usb {
        struct usb_endpoint_descriptor *bulk_in, *bulk_out;
        struct usb_anchor rx_submitted;
 
+       /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
+        * not yet ACKed, transmissions on this device. This value is
+        * also used as a sentinel for marking free tx contexts.
+        */
        u32 fw_version;
        unsigned int nchannels;
+       unsigned int max_tx_urbs;
        enum kvaser_usb_family family;
 
        bool rxinitdone;
@@ -467,19 +472,18 @@ struct kvaser_usb {
 
 struct kvaser_usb_net_priv {
        struct can_priv can;
-
-       spinlock_t tx_contexts_lock;
-       int active_tx_contexts;
-       struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
-
-       struct usb_anchor tx_submitted;
-       struct completion start_comp, stop_comp;
+       struct can_berr_counter bec;
 
        struct kvaser_usb *dev;
        struct net_device *netdev;
        int channel;
 
-       struct can_berr_counter bec;
+       struct completion start_comp, stop_comp;
+       struct usb_anchor tx_submitted;
+
+       spinlock_t tx_contexts_lock;
+       int active_tx_contexts;
+       struct kvaser_usb_tx_urb_context tx_contexts[];
 };
 
 static const struct usb_device_id kvaser_usb_table[] = {
@@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                         * for further details.
                         */
                        if (tmp->len == 0) {
-                               pos = round_up(pos,
-                                              dev->bulk_in->wMaxPacketSize);
+                               pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+                                                               wMaxPacketSize));
                                continue;
                        }
 
@@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
        switch (dev->family) {
        case KVASER_LEAF:
                dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+               dev->max_tx_urbs =
+                       le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
                break;
        case KVASER_USBCAN:
                dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+               dev->max_tx_urbs =
+                       le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
                break;
        }
 
@@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
 
        stats = &priv->netdev->stats;
 
-       context = &priv->tx_contexts[tid % MAX_TX_URBS];
+       context = &priv->tx_contexts[tid % dev->max_tx_urbs];
 
        /* Sometimes the state change doesn't come after a bus-off event */
        if (priv->can.restart_ms &&
@@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
        spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
        can_get_echo_skb(priv->netdev, context->echo_index);
-       context->echo_index = MAX_TX_URBS;
+       context->echo_index = dev->max_tx_urbs;
        --priv->active_tx_contexts;
        netif_wake_queue(priv->netdev);
 
@@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                 * number of events in case of a heavy rx load on the bus.
                 */
                if (msg->len == 0) {
-                       pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+                       pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+                                                       wMaxPacketSize));
                        continue;
                }
 
@@ -1512,11 +1521,13 @@ error:
 
 static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
 {
-       int i;
+       int i, max_tx_urbs;
+
+       max_tx_urbs = priv->dev->max_tx_urbs;
 
        priv->active_tx_contexts = 0;
-       for (i = 0; i < MAX_TX_URBS; i++)
-               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+       for (i = 0; i < max_tx_urbs; i++)
+               priv->tx_contexts[i].echo_index = max_tx_urbs;
 }
 
 /* This method might sleep. Do not call it in the atomic context
@@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
 
        spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
-               if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+       for (i = 0; i < dev->max_tx_urbs; i++) {
+               if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
                        context = &priv->tx_contexts[i];
 
                        context->echo_index = i;
                        can_put_echo_skb(skb, netdev, context->echo_index);
                        ++priv->active_tx_contexts;
-                       if (priv->active_tx_contexts >= MAX_TX_URBS)
+                       if (priv->active_tx_contexts >= dev->max_tx_urbs)
                                netif_stop_queue(netdev);
 
                        break;
@@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
                can_free_echo_skb(netdev, context->echo_index);
-               context->echo_index = MAX_TX_URBS;
+               context->echo_index = dev->max_tx_urbs;
                --priv->active_tx_contexts;
                netif_wake_queue(netdev);
 
@@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        if (err)
                return err;
 
-       netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+       netdev = alloc_candev(sizeof(*priv) +
+                             dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+                             dev->max_tx_urbs);
        if (!netdev) {
                dev_err(&intf->dev, "Cannot alloc candev\n");
                return -ENOMEM;
@@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
                return err;
        }
 
+       dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+               ((dev->fw_version >> 24) & 0xff),
+               ((dev->fw_version >> 16) & 0xff),
+               (dev->fw_version & 0xffff));
+
+       dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
+
        err = kvaser_usb_get_card_info(dev);
        if (err) {
                dev_err(&intf->dev,
@@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
                return err;
        }
 
-       dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
-               ((dev->fw_version >> 24) & 0xff),
-               ((dev->fw_version >> 16) & 0xff),
-               (dev->fw_version & 0xffff));
-
        for (i = 0; i < dev->nchannels; i++) {
                err = kvaser_usb_init_one(intf, id, i);
                if (err) {
index 1ba7c25002e1e27ec1a9333412c175edb0f5594a..e8fc4952c6b074f80a2af132d2bc161e7fd3d8c7 100644 (file)
@@ -26,8 +26,8 @@
 #define PUCAN_CMD_FILTER_STD           0x008
 #define PUCAN_CMD_TX_ABORT             0x009
 #define PUCAN_CMD_WR_ERR_CNT           0x00a
-#define PUCAN_CMD_RX_FRAME_ENABLE      0x00b
-#define PUCAN_CMD_RX_FRAME_DISABLE     0x00c
+#define PUCAN_CMD_SET_EN_OPTION                0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION       0x00c
 #define PUCAN_CMD_END_OF_COLLECTION    0x3ff
 
 /* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
        u16     unused;
 };
 
-/* uCAN RX_FRAME_ENABLE command fields */
-#define PUCAN_FLTEXT_ERROR             0x0001
-#define PUCAN_FLTEXT_BUSLOAD           0x0002
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR             0x0001
+#define PUCAN_OPTION_BUSLOAD           0x0002
+#define PUCAN_OPTION_CANDFDISO         0x0004
 
-struct __packed pucan_filter_ext {
+struct __packed pucan_options {
        __le16  opcode_channel;
 
-       __le16  ext_mask;
+       __le16  options;
        u32     unused;
 };
 
index 0bac0f14edc3cd73727b4a0d9f0776d857c4ba8c..a9221ad9f1a0a0387de1ebf8470c0d360e895773 100644 (file)
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
        u8      unused[5];
 };
 
-/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
 #define PCAN_UFD_FLTEXT_CALIBRATION    0x8000
 
-struct __packed pcan_ufd_filter_ext {
+struct __packed pcan_ufd_options {
        __le16  opcode_channel;
 
-       __le16  ext_mask;
+       __le16  ucan_mask;
        u16     unused;
        __le16  usb_mask;
 };
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
        /* moves the pointer forward */
        pc += sizeof(struct pucan_wr_err_cnt);
 
+       /* add command to switch from ISO to non-ISO mode, if fw allows it */
+       if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
+               struct pucan_options *puo = (struct pucan_options *)pc;
+
+               puo->opcode_channel =
+                       (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
+                       pucan_cmd_opcode_channel(dev,
+                                                PUCAN_CMD_CLR_DIS_OPTION) :
+                       pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
+
+               puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
+
+               /* to be sure that no other extended bits will be taken into
+                * account
+                */
+               puo->unused = 0;
+
+               /* moves the pointer forward */
+               pc += sizeof(struct pucan_options);
+       }
+
        /* next, go back to operational mode */
        cmd = (struct pucan_command *)pc;
        cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
        return pcan_usb_fd_send_cmd(dev, cmd);
 }
 
-/* set/unset notifications filter:
+/* set/unset options
  *
- *     onoff   sets(1)/unset(0) notifications
- *     mask    each bit defines a kind of notification to set/unset
+ *     onoff   set(1)/unset(0) options
+ *     mask    each bit defines a kind of options to set/unset
  */
-static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
-                                     bool onoff, u16 ext_mask, u16 usb_mask)
+static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
+                                  bool onoff, u16 ucan_mask, u16 usb_mask)
 {
-       struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+       struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
 
        cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
-                                       (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
-                                                 PUCAN_CMD_RX_FRAME_DISABLE);
+                                       (onoff) ? PUCAN_CMD_SET_EN_OPTION :
+                                                 PUCAN_CMD_CLR_DIS_OPTION);
 
-       cmd->ext_mask = cpu_to_le16(ext_mask);
+       cmd->ucan_mask = cpu_to_le16(ucan_mask);
        cmd->usb_mask = cpu_to_le16(usb_mask);
 
        /* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
                                       &pcan_usb_pro_fd);
 
                /* enable USB calibration messages */
-               err = pcan_usb_fd_set_filter_ext(dev, 1,
-                                                PUCAN_FLTEXT_ERROR,
-                                                PCAN_UFD_FLTEXT_CALIBRATION);
+               err = pcan_usb_fd_set_options(dev, 1,
+                                             PUCAN_OPTION_ERROR,
+                                             PCAN_UFD_FLTEXT_CALIBRATION);
        }
 
        pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
 
        /* turn off special msgs for that interface if no other dev opened */
        if (pdev->usb_if->dev_opened_count == 1)
-               pcan_usb_fd_set_filter_ext(dev, 0,
-                                          PUCAN_FLTEXT_ERROR,
-                                          PCAN_UFD_FLTEXT_CALIBRATION);
+               pcan_usb_fd_set_options(dev, 0,
+                                       PUCAN_OPTION_ERROR,
+                                       PCAN_UFD_FLTEXT_CALIBRATION);
        pdev->usb_if->dev_opened_count--;
 
        return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
                         pdev->usb_if->fw_info.fw_version[2],
                         dev->adapter->ctrl_count);
 
-               /* the currently supported hw is non-ISO */
-               dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+               /* check for ability to switch between ISO/non-ISO modes */
+               if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+                       /* firmware >= 2.x supports ISO/non-ISO switching */
+                       dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+               } else {
+                       /* firmware < 2.x only supports fixed(!) non-ISO */
+                       dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
+               }
 
                /* tell the hardware the can driver is running */
                err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
        if (dev->ctrl_idx == 0) {
                /* turn off calibration message if any device were opened */
                if (pdev->usb_if->dev_opened_count > 0)
-                       pcan_usb_fd_set_filter_ext(dev, 0,
-                                                  PUCAN_FLTEXT_ERROR,
-                                                  PCAN_UFD_FLTEXT_CALIBRATION);
+                       pcan_usb_fd_set_options(dev, 0,
+                                               PUCAN_OPTION_ERROR,
+                                               PCAN_UFD_FLTEXT_CALIBRATION);
 
                /* tell USB adapter that the driver is being unloaded */
                pcan_usb_fd_drv_loaded(dev, 0);
index 11d6e6561df159c3dc9dff28fc504e945a77f47b..15a8190a6f75f61908f5ba968d37451e2f982897 100644 (file)
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 {
        struct pcnet32_private *lp;
        int i, media;
-       int fdx, mii, fset, dxsuflo;
+       int fdx, mii, fset, dxsuflo, sram;
        int chip_version;
        char *chipname;
        struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
        }
 
        /* initialize variables */
-       fdx = mii = fset = dxsuflo = 0;
+       fdx = mii = fset = dxsuflo = sram = 0;
        chip_version = (chip_version >> 12) & 0xffff;
 
        switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                chipname = "PCnet/FAST III 79C973";     /* PCI */
                fdx = 1;
                mii = 1;
+               sram = 1;
                break;
        case 0x2626:
                chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                chipname = "PCnet/FAST III 79C975";     /* PCI */
                fdx = 1;
                mii = 1;
+               sram = 1;
                break;
        case 0x2628:
                chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                dxsuflo = 1;
        }
 
+       /*
+        * The Am79C973/Am79C975 controllers come with 12K of SRAM
+        * which we can use for the Tx/Rx buffers but most importantly,
+        * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+        * Tx fifo underflows.
+        */
+       if (sram) {
+               /*
+                * The SRAM is being configured in two steps. First we
+                * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+                * to the datasheet, each bit corresponds to a 512-byte
+                * page so we can have at most 24 pages. The SRAM_SIZE
+                * holds the value of the upper 8 bits of the 16-bit SRAM size.
+                * The low 8-bits start at 0x00 and end at 0xff. So the
+                * address range is from 0x0000 up to 0x17ff. Therefore,
+                * the SRAM_SIZE is set to 0x17. The next step is to set
+                * the BCR26:SRAM_BND midway through so the Tx and Rx
+                * buffers can share the SRAM equally.
+                */
+               a->write_bcr(ioaddr, 25, 0x17);
+               a->write_bcr(ioaddr, 26, 0xc);
+               /* And finally enable the NOUFLO bit */
+               a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+       }
+
        dev = alloc_etherdev(sizeof(*lp));
        if (!dev) {
                ret = -ENOMEM;
index 756053c028becff93c62cfcdbd6e1f635594cc01..4085c4b310470b6ebf718121fa423d2995dcb8ef 100644 (file)
@@ -1811,7 +1811,7 @@ struct bnx2x {
        int                     stats_state;
 
        /* used for synchronization of concurrent threads statistics handling */
-       spinlock_t              stats_lock;
+       struct mutex            stats_lock;
 
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
 
        int fp_array_size;
        u32 dump_preset_idx;
-       bool                                    stats_started;
-       struct semaphore                        stats_sema;
 
        u8                                      phys_port_id[ETH_ALEN];
 
index 996e215fc3246c4fba6f1714ddceb69b88f179be..1ec635f549944f87e0c0df4eb0816dcf33d90706 100644 (file)
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
        u32 xmac_val;
        u32 emac_addr;
        u32 emac_val;
-       u32 umac_addr;
-       u32 umac_val;
+       u32 umac_addr[2];
+       u32 umac_val[2];
        u32 bmac_addr;
        u32 bmac_val[2];
 };
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
        return 0;
 }
 
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+       if (!CHIP_IS_E1x(bp))
+               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                      1 << BP_ABS_FUNC(bp));
+}
+
 static int bnx2x_init_hw_func(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 
-       if (!CHIP_IS_E1x(bp))
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+       bnx2x_clean_pglue_errors(bp);
 
        bnx2x_init_block(bp, BLOCK_ATC, init_phase);
        bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
        return base + (BP_ABS_FUNC(bp)) * stride;
 }
 
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+                                        u8 port, u32 reset_reg,
+                                        struct bnx2x_mac_vals *vals)
+{
+       u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+       u32 base_addr;
+
+       if (!(mask & reset_reg))
+               return false;
+
+       BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+       base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+       vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+       vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+       REG_WR(bp, vals->umac_addr[port], 0);
+
+       return true;
+}
+
 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                                        struct bnx2x_mac_vals *vals)
 {
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
        u8 port = BP_PORT(bp);
 
        /* reset addresses as they also mark which values were changed */
-       vals->bmac_addr = 0;
-       vals->umac_addr = 0;
-       vals->xmac_addr = 0;
-       vals->emac_addr = 0;
+       memset(vals, 0, sizeof(*vals));
 
        reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                        REG_WR(bp, vals->xmac_addr, 0);
                        mac_stopped = true;
                }
-               mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
-               if (mask & reset_reg) {
-                       BNX2X_DEV_INFO("Disable umac Rx\n");
-                       base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
-                       vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
-                       vals->umac_val = REG_RD(bp, vals->umac_addr);
-                       REG_WR(bp, vals->umac_addr, 0);
-                       mac_stopped = true;
-               }
+
+               mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+                                                           reset_reg, vals);
+               mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+                                                           reset_reg, vals);
        }
 
        if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
 
-               /* close LLH filters towards the BRB */
+               /* close LLH filters for both ports towards the BRB */
                bnx2x_set_rx_filter(&bp->link_params, 0);
+               bp->link_params.port ^= 1;
+               bnx2x_set_rx_filter(&bp->link_params, 0);
+               bp->link_params.port ^= 1;
 
                /* Check if the UNDI driver was previously loaded */
                if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 
        if (mac_vals.xmac_addr)
                REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
-       if (mac_vals.umac_addr)
-               REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+       if (mac_vals.umac_addr[0])
+               REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+       if (mac_vals.umac_addr[1])
+               REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
        if (mac_vals.emac_addr)
                REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
        if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        return bnx2x_prev_mcp_done(bp);
 }
 
-/* previous driver DMAE transaction may have occurred when pre-boot stage ended
- * and boot began, or when kdump kernel was loaded. Either case would invalidate
- * the addresses of the transaction, resulting in was-error bit set in the pci
- * causing all hw-to-host pcie transactions to timeout. If this happened we want
- * to clear the interrupt which detected this from the pglueb and the was done
- * bit
- */
-static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
-{
-       if (!CHIP_IS_E1x(bp)) {
-               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-                       DP(BNX2X_MSG_SP,
-                          "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
-                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
-                              1 << BP_FUNC(bp));
-               }
-       }
-}
-
 static int bnx2x_prev_unload(struct bnx2x *bp)
 {
        int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
        /* clear hw from errors which may have resulted from an interrupted
         * dmae transaction.
         */
-       bnx2x_prev_interrupted_dmae(bp);
+       bnx2x_clean_pglue_errors(bp);
 
        /* Release previously held locks */
        hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
+       mutex_init(&bp->stats_lock);
        bp->drv_info_mng_owner = false;
-       spin_lock_init(&bp->stats_lock);
-       sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -13668,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       spin_lock_bh(&bp->stats_lock);
+       mutex_lock(&bp->stats_lock);
        bp->stats_state = STATS_STATE_DISABLED;
-       spin_unlock_bh(&bp->stats_lock);
+       mutex_unlock(&bp->stats_lock);
 
        bnx2x_save_statistics(bp);
 
index e5aca2de1871350f3e47a788c4360b461c02a039..cfe3c7695455e16eab516e0f99db6ee6b7e58879 100644 (file)
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
                cookie.vf = vf;
                cookie.state = VF_ACQUIRED;
-               bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+               rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+               if (rc)
+                       goto op_err;
        }
 
        DP(BNX2X_MSG_IOV, "set state to acquired\n");
index d1608297c773746237a8faf3697277afcbe45918..800ab44a07cecd721c8b12f0c5b8602e1124a0dd 100644 (file)
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
  */
 static void bnx2x_storm_stats_post(struct bnx2x *bp)
 {
-       if (!bp->stats_pending) {
-               int rc;
+       int rc;
 
-               spin_lock_bh(&bp->stats_lock);
-
-               if (bp->stats_pending) {
-                       spin_unlock_bh(&bp->stats_lock);
-                       return;
-               }
-
-               bp->fw_stats_req->hdr.drv_stats_counter =
-                       cpu_to_le16(bp->stats_counter++);
+       if (bp->stats_pending)
+               return;
 
-               DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
-                  le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+       bp->fw_stats_req->hdr.drv_stats_counter =
+               cpu_to_le16(bp->stats_counter++);
 
-               /* adjust the ramrod to include VF queues statistics */
-               bnx2x_iov_adjust_stats_req(bp);
-               bnx2x_dp_stats(bp);
+       DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+          le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
 
-               /* send FW stats ramrod */
-               rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
-                                  U64_HI(bp->fw_stats_req_mapping),
-                                  U64_LO(bp->fw_stats_req_mapping),
-                                  NONE_CONNECTION_TYPE);
-               if (rc == 0)
-                       bp->stats_pending = 1;
+       /* adjust the ramrod to include VF queues statistics */
+       bnx2x_iov_adjust_stats_req(bp);
+       bnx2x_dp_stats(bp);
 
-               spin_unlock_bh(&bp->stats_lock);
-       }
+       /* send FW stats ramrod */
+       rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+                          U64_HI(bp->fw_stats_req_mapping),
+                          U64_LO(bp->fw_stats_req_mapping),
+                          NONE_CONNECTION_TYPE);
+       if (rc == 0)
+               bp->stats_pending = 1;
 }
 
 static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
  */
 
 /* should be called under stats_sema */
-static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
 {
        struct dmae_command *dmae;
        u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
 }
 
 /* should be called under stats_sema */
-static void __bnx2x_stats_start(struct bnx2x *bp)
+static void bnx2x_stats_start(struct bnx2x *bp)
 {
        if (IS_PF(bp)) {
                if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_storm_stats_post(bp);
        }
-
-       bp->stats_started = true;
-}
-
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
 {
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       __bnx2x_stats_pmf_update(bp);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
-}
-
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-       __bnx2x_stats_pmf_update(bp);
-       up(&bp->stats_sema);
+       bnx2x_stats_pmf_update(bp);
+       bnx2x_stats_start(bp);
 }
 
 static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
         */
        if (IS_VF(bp))
                return;
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
+
        bnx2x_stats_comp(bp);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
+       bnx2x_stats_start(bp);
 }
 
 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 {
        u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 
-       /* we run update from timer context, so give up
-        * if somebody is in the middle of transition
-        */
-       if (down_trylock(&bp->stats_sema))
+       if (bnx2x_edebug_stats_stopped(bp))
                return;
 
-       if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
-               goto out;
-
        if (IS_PF(bp)) {
                if (*stats_comp != DMAE_COMP_VAL)
-                       goto out;
+                       return;
 
                if (bp->port.pmf)
                        bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
                                BNX2X_ERR("storm stats were not updated for 3 times\n");
                                bnx2x_panic();
                        }
-                       goto out;
+                       return;
                }
        } else {
                /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        /* vf is done */
        if (IS_VF(bp))
-               goto out;
+               return;
 
        if (netif_msg_timer(bp)) {
                struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
-
-out:
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
 
 static void bnx2x_stats_stop(struct bnx2x *bp)
 {
-       int update = 0;
-
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-
-       bp->stats_started = false;
+       bool update = false;
 
        bnx2x_stats_comp(bp);
 
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_stats_comp(bp);
        }
-
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
 
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
-       enum bnx2x_stats_state state;
-       void (*action)(struct bnx2x *bp);
+       enum bnx2x_stats_state state = bp->stats_state;
+
        if (unlikely(bp->panic))
                return;
 
-       spin_lock_bh(&bp->stats_lock);
-       state = bp->stats_state;
+       /* Statistics update run from timer context, and we don't want to stop
+        * that context in case someone is in the middle of a transition.
+        * For other events, wait a bit until lock is taken.
+        */
+       if (!mutex_trylock(&bp->stats_lock)) {
+               if (event == STATS_EVENT_UPDATE)
+                       return;
+
+               DP(BNX2X_MSG_STATS,
+                  "Unlikely stats' lock contention [event %d]\n", event);
+               mutex_lock(&bp->stats_lock);
+       }
+
+       bnx2x_stats_stm[state][event].action(bp);
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
-       action = bnx2x_stats_stm[state][event].action;
-       spin_unlock_bh(&bp->stats_lock);
 
-       action(bp);
+       mutex_unlock(&bp->stats_lock);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
        }
 }
 
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
-                          void (func_to_exec)(void *cookie),
-                          void *cookie){
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+                         void (func_to_exec)(void *cookie),
+                         void *cookie)
+{
+       int cnt = 10, rc = 0;
+
+       /* Wait for statistics to end [while blocking further requests],
+        * then run supplied function 'safely'.
+        */
+       mutex_lock(&bp->stats_lock);
+
        bnx2x_stats_comp(bp);
+       while (bp->stats_pending && cnt--)
+               if (bnx2x_storm_stats_update(bp))
+                       usleep_range(1000, 2000);
+       if (bp->stats_pending) {
+               BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+               rc = -EBUSY;
+               goto out;
+       }
+
        func_to_exec(cookie);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
+
+out:
+       /* No need to restart statistics - if they're enabled, the timer
+        * will restart the statistics.
+        */
+       mutex_unlock(&bp->stats_lock);
+
+       return rc;
 }
index 2beceaefdeea7aa5ac53f6a3028cd0fbcacbc51a..965539a9dabe7e4702e1ba6b382aefc69fb26fac 100644 (file)
@@ -539,9 +539,9 @@ struct bnx2x;
 void bnx2x_memset_stats(struct bnx2x *bp);
 void bnx2x_stats_init(struct bnx2x *bp);
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
-                          void (func_to_exec)(void *cookie),
-                          void *cookie);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+                         void (func_to_exec)(void *cookie),
+                         void *cookie);
 
 /**
  * bnx2x_save_statistics - save statistics when unloading.
index 97842d03675b327d65f564b351f24d12bf72c18b..c6ff4890d171a1509c7b3ca2aa21e5b0edd6039f 100644 (file)
@@ -376,8 +376,6 @@ enum {
 enum {
        INGQ_EXTRAS = 2,        /* firmware event queue and */
                                /*   forwarded interrupts */
-       MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
-                  + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
        MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
                   + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
 };
@@ -616,11 +614,13 @@ struct sge {
        unsigned int idma_qid[2];   /* SGE IDMA Hung Ingress Queue ID */
 
        unsigned int egr_start;
+       unsigned int egr_sz;
        unsigned int ingr_start;
-       void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
-       struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
-       DECLARE_BITMAP(starving_fl, MAX_EGRQ);
-       DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+       unsigned int ingr_sz;
+       void **egr_map;    /* qid->queue egress queue map */
+       struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
+       unsigned long *starving_fl;
+       unsigned long *txq_maperr;
        struct timer_list rx_timer; /* refills starving FLs */
        struct timer_list tx_timer; /* checks Tx queues */
 };
@@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
 
 unsigned int qtimer_val(const struct adapter *adap,
                        const struct sge_rspq *q);
+
+int t4_init_devlog_params(struct adapter *adapter);
 int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
index 78854ceb0870a29004ab04261fa26b296b958d4e..dcb0479452907abd43732aa9332f927f7f0c3ef3 100644 (file)
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
                "0.9375" };
 
        int i;
-       u16 incr[NMTUS][NCCTRL_WIN];
+       u16 (*incr)[NCCTRL_WIN];
        struct adapter *adap = seq->private;
 
+       incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+       if (!incr)
+               return -ENOMEM;
+
        t4_read_cong_tbl(adap, incr);
 
        for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
                           adap->params.a_wnd[i],
                           dec_fac[adap->params.b_wnd[i]]);
        }
+
+       kfree(incr);
        return 0;
 }
 
index a22cf932ca3536920798ef50241fd9b43c26857a..d92995138f7ef9253c3b2ac37efba2c491e2f528 100644 (file)
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+       for (i = 0; i < adap->sge.ingr_sz; i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
                if (q && q->handler) {
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
        }
 }
 
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+       if (adap->flags & FULL_INIT_DONE) {
+               t4_intr_disable(adap);
+               if (adap->flags & USING_MSIX) {
+                       free_msix_queue_irqs(adap);
+                       free_irq(adap->msix_info[0].vec, adap);
+               } else {
+                       free_irq(adap->pdev->irq, adap);
+               }
+               quiesce_rx(adap);
+       }
+}
+
 /*
  * Enable NAPI scheduling and interrupt generation for all Rx queues.
  */
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+       for (i = 0; i < adap->sge.ingr_sz; i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
                if (!q)
@@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
        int err, msi_idx, i, j;
        struct sge *s = &adap->sge;
 
-       bitmap_zero(s->starving_fl, MAX_EGRQ);
-       bitmap_zero(s->txq_maperr, MAX_EGRQ);
+       bitmap_zero(s->starving_fl, s->egr_sz);
+       bitmap_zero(s->txq_maperr, s->egr_sz);
 
        if (adap->flags & USING_MSIX)
                msi_idx = 1;         /* vector 0 is for non-queue interrupts */
@@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
                msi_idx = -((int)s->intrq.abs_id + 1);
        }
 
+       /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+        * don't forget to update the following which need to be
+        * synchronized to and changes here.
+        *
+        * 1. The calculations of MAX_INGQ in cxgb4.h.
+        *
+        * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+        *    to accommodate any new/deleted Ingress Queues
+        *    which need MSI-X Vectors.
+        *
+        * 3. Update sge_qinfo_show() to include information on the
+        *    new/deleted queues.
+        */
        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
                               msi_idx, NULL, fwevtq_handler);
        if (err) {
@@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
 
 static void cxgb_down(struct adapter *adapter)
 {
-       t4_intr_disable(adapter);
        cancel_work_sync(&adapter->tid_release_task);
        cancel_work_sync(&adapter->db_full_task);
        cancel_work_sync(&adapter->db_drop_task);
        adapter->tid_release_task_busy = false;
        adapter->tid_release_head = NULL;
 
-       if (adapter->flags & USING_MSIX) {
-               free_msix_queue_irqs(adapter);
-               free_irq(adapter->msix_info[0].vec, adapter);
-       } else
-               free_irq(adapter->pdev->irq, adapter);
-       quiesce_rx(adapter);
        t4_sge_stop(adapter);
        t4_free_sge_resources(adapter);
        adapter->flags &= ~FULL_INIT_DONE;
@@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        if (ret < 0)
                return ret;
 
-       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
-                         0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+                         MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+                         FW_CMD_CAP_PF);
        if (ret < 0)
                return ret;
 
@@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
-       struct fw_devlog_cmd devlog_cmd;
-       u32 devlog_meminfo;
        int reset = 1;
 
+       /* Grab Firmware Device Log parameters as early as possible so we have
+        * access to it for debugging, etc.
+        */
+       ret = t4_init_devlog_params(adap);
+       if (ret < 0)
+               return ret;
+
        /* Contact FW, advertising Master capability */
        ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
        if (ret < 0) {
@@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
 
-       /* Read firmware device log parameters.  We really need to find a way
-        * to get these parameters initialized with some default values (which
-        * are likely to be correct) for the case where we either don't
-        * attache to the firmware or it's crashed when we probe the adapter.
-        * That way we'll still be able to perform early firmware startup
-        * debugging ...  If the request to get the Firmware's Device Log
-        * parameters fails, we'll live so we don't make that a fatal error.
-        */
-       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
-       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
-                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
-       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
-                        &devlog_cmd);
-       if (ret == 0) {
-               devlog_meminfo =
-                       ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
-               adap->params.devlog.memtype =
-                       FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
-               adap->params.devlog.start =
-                       FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
-               adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
-       }
-
        /*
         * Find out what ports are available to us.  Note that we need to do
         * this before calling adap_init0_no_config() since it needs nports
@@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
        adap->tids.nftids = val[4] - val[3] + 1;
        adap->sge.ingr_start = val[5];
 
+       /* qids (ingress/egress) returned from firmware can be anywhere
+        * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+        * Hence driver needs to allocate memory for this range to
+        * store the queue info. Get the highest IQFLINT/EQ index returned
+        * in FW_EQ_*_CMD.alloc command.
+        */
+       params[0] = FW_PARAM_PFVF(EQ_END);
+       params[1] = FW_PARAM_PFVF(IQFLINT_END);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       if (ret < 0)
+               goto bye;
+       adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+       adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+       adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+                                   sizeof(*adap->sge.egr_map), GFP_KERNEL);
+       if (!adap->sge.egr_map) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+                                    sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+       if (!adap->sge.ingr_map) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       /* Allocate the memory for the vaious egress queue bitmaps
+        * ie starving_fl and txq_maperr.
+        */
+       adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+                                       sizeof(long), GFP_KERNEL);
+       if (!adap->sge.starving_fl) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+                                      sizeof(long), GFP_KERNEL);
+       if (!adap->sge.txq_maperr) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
        params[0] = FW_PARAM_PFVF(CLIP_START);
        params[1] = FW_PARAM_PFVF(CLIP_END);
        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
         * happened to HW/FW, stop issuing commands.
         */
 bye:
+       kfree(adap->sge.egr_map);
+       kfree(adap->sge.ingr_map);
+       kfree(adap->sge.starving_fl);
+       kfree(adap->sge.txq_maperr);
        if (ret != -ETIMEDOUT && ret != -EIO)
                t4_fw_bye(adap, adap->mbox);
        return ret;
@@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
                netif_carrier_off(dev);
        }
        spin_unlock(&adap->stats_lock);
+       disable_interrupts(adap);
        if (adap->flags & FULL_INIT_DONE)
                cxgb_down(adap);
        rtnl_unlock();
@@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
 
        t4_free_mem(adapter->l2t);
        t4_free_mem(adapter->tids.tid_tab);
+       kfree(adapter->sge.egr_map);
+       kfree(adapter->sge.ingr_map);
+       kfree(adapter->sge.starving_fl);
+       kfree(adapter->sge.txq_maperr);
        disable_msi(adapter);
 
        for_each_port(adapter, i)
@@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
                if (is_offload(adapter))
                        detach_ulds(adapter);
 
+               disable_interrupts(adapter);
+
                for_each_port(adapter, i)
                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
                                unregister_netdev(adapter->port[i]);
index b4b9f6048fe730dc1287a648a5e61c7f2d92dd7d..b688b32c21fe530aa0cd48b61323bca3ed269b53 100644 (file)
@@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
-       for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+       for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
                for (m = s->starving_fl[i]; m; m &= m - 1) {
                        struct sge_eth_rxq *rxq;
                        unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
-       for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+       for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
                for (m = s->txq_maperr[i]; m; m &= m - 1) {
                        unsigned long id = __ffs(m) + i * BITS_PER_LONG;
                        struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
                free_rspq_fl(adap, &adap->sge.intrq, NULL);
 
        /* clear the reverse egress queue map */
-       memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+       memset(adap->sge.egr_map, 0,
+              adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
 }
 
 void t4_sge_start(struct adapter *adap)
index 1abdfa123c6cf8d42a4788400539b40b05ed84eb..ee394dc68303851153a3598fe4455cec972597d9 100644 (file)
@@ -4458,6 +4458,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
        return 0;
 }
 
+/**
+ *     t4_init_devlog_params - initialize adapter->params.devlog
+ *     @adap: the adapter
+ *
+ *     Initialize various fields of the adapter's Firmware Device Log
+ *     Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap)
+{
+       struct devlog_params *dparams = &adap->params.devlog;
+       u32 pf_dparams;
+       unsigned int devlog_meminfo;
+       struct fw_devlog_cmd devlog_cmd;
+       int ret;
+
+       /* If we're dealing with newer firmware, the Device Log Paramerters
+        * are stored in a designated register which allows us to access the
+        * Device Log even if we can't talk to the firmware.
+        */
+       pf_dparams =
+               t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
+       if (pf_dparams) {
+               unsigned int nentries, nentries128;
+
+               dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
+               dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
+
+               nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
+               nentries = (nentries128 + 1) * 128;
+               dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+               return 0;
+       }
+
+       /* Otherwise, ask the firmware for it's Device Log Parameters.
+        */
+       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+                        &devlog_cmd);
+       if (ret)
+               return ret;
+
+       devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+       dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+       dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+       dparams->size = ntohl(devlog_cmd.memsize_devlog);
+
+       return 0;
+}
+
 /**
  *     t4_init_sge_params - initialize adap->params.sge
  *     @adapter: the adapter
index 231a725f6d5d1679c4d6b07ff6694ac387f0b66a..326674b19983825af5631993b4427e0132ed6ba6 100644 (file)
@@ -63,6 +63,8 @@
 #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
 #define SGE_PF_KDOORBELL_A 0x0
 
 #define QID_S    15
 #define PFNUM_V(x) ((x) << PFNUM_S)
 
 #define PCIE_FW_A 0x30b8
+#define PCIE_FW_PF_A 0x30bc
 
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
 
index 9b353a88cbdab13a7ce85456e393edf197dd1740..a4a19e0ec7f5d76590f0e73641d5e03afbcde43a 100644 (file)
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
        FW_RI_BIND_MW_WR               = 0x18,
        FW_RI_FR_NSMR_WR               = 0x19,
        FW_RI_INV_LSTAG_WR             = 0x1a,
-       FW_LASTC2E_WR                  = 0x40
+       FW_LASTC2E_WR                  = 0x70
 };
 
 struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
        FW_MEMTYPE_CF_EXTMEM            = 0x2,
        FW_MEMTYPE_CF_FLASH             = 0x4,
        FW_MEMTYPE_CF_INTERNAL          = 0x5,
+       FW_MEMTYPE_CF_EXTMEM1           = 0x6,
 };
 
 struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
        FW_PARAMS_MNEM_PFVF             = 2,    /* function params */
        FW_PARAMS_MNEM_REG              = 3,    /* limited register access */
        FW_PARAMS_MNEM_DMAQ             = 4,    /* dma queue params */
+       FW_PARAMS_MNEM_CHNET            = 5,    /* chnet params */
        FW_PARAMS_MNEM_LAST
 };
 
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
        FW_DEVLOG_FACILITY_FCOE         = 0x2E,
        FW_DEVLOG_FACILITY_FOISCSI      = 0x30,
        FW_DEVLOG_FACILITY_FOFCOE       = 0x32,
-       FW_DEVLOG_FACILITY_MAX          = 0x32,
+       FW_DEVLOG_FACILITY_CHNET        = 0x34,
+       FW_DEVLOG_FACILITY_MAX          = 0x34,
 };
 
 /* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
        (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
         FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
 
+/* P C I E   F W   P F 7   R E G I S T E R */
+
+/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware.  The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command.  Thus, 0 means 128
+ * and 15 means 2048.  This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG              7
+
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_S        28
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_M        0xf
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
+       ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
+        PCIE_FW_PF_DEVLOG_NENTRIES128_M)
+
+#define PCIE_FW_PF_DEVLOG_ADDR16_S     4
+#define PCIE_FW_PF_DEVLOG_ADDR16_M     0xffffff
+#define PCIE_FW_PF_DEVLOG_ADDR16_V(x)  ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
+#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
+
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_S    0
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_M    0xf
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+
 #endif /* _T4FW_INTERFACE_H_ */
index e2bd3f74785851d0db24c282f5a497972bcaf20f..b9d1cbac0eee3c97e76cff147df732601b83f714 100644 (file)
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_MINOR 0x0D
+#define T4FW_VERSION_MICRO 0x20
 #define T4FW_VERSION_BUILD 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_MINOR 0x0D
+#define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
 #endif
index 0545f0de1c52be282af53d6a5f03916f93ad7013..e0d711071afb7d6d80763666141477fb3dc4c479 100644 (file)
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                                              ? (tq->pidx - 1)
                                              : (tq->size - 1));
                        __be64 *src = (__be64 *)&tq->desc[index];
-                       __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+                       __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
                                                         SGE_UDB_WCDOORBELL);
                        unsigned int count = EQ_UNIT / sizeof(__be64);
 
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                         * DMA.
                         */
                        while (count) {
-                               writeq(*src, dst);
+                               /* the (__force u64) is because the compiler
+                                * doesn't understand the endian swizzling
+                                * going on
+                                */
+                               writeq((__force u64)*src, dst);
                                src++;
                                dst++;
                                count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
        wr = (void *)&txq->q.desc[txq->q.pidx];
        wr->equiq_to_len16 = cpu_to_be32(wr_mid);
-       wr->r3[0] = cpu_to_be64(0);
-       wr->r3[1] = cpu_to_be64(0);
+       wr->r3[0] = cpu_to_be32(0);
+       wr->r3[1] = cpu_to_be32(0);
        skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
        end = (u64 *)wr + flits;
 
index 1b5506df35b15ab74eaf1ecea220da4ad6278a3f..280b4a21584934f56fbcc24399dfd0cf5feb694e 100644 (file)
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
 
                        if (rpl) {
                                /* request bit in high-order BE word */
-                               WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+                               WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
                                         & FW_CMD_REQUEST_F) == 0);
                                get_mbox_rpl(adapter, rpl, size, mbox_data);
-                               WARN_ON((be32_to_cpu(*(u32 *)rpl)
+                               WARN_ON((be32_to_cpu(*(__be32 *)rpl)
                                         & FW_CMD_REQUEST_F) != 0);
                        }
                        t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
         *  o The BAR2 Queue ID.
         *  o The BAR2 Queue ID Offset into the BAR2 page.
         */
-       bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+       bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
        bar2_qid = qid & qpp_mask;
        bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
 
index 27de37aa90afe12b2d564d87c9b13246160d39aa..27b9fe99a9bdfa4eddb70aaa9749eaef6bdd3518 100644 (file)
@@ -354,6 +354,7 @@ struct be_vf_cfg {
        u16 vlan_tag;
        u32 tx_rate;
        u32 plink_tracking;
+       u32 privileges;
 };
 
 enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
 
        u8 __iomem *csr;        /* CSR BAR used only for BE2/3 */
        u8 __iomem *db;         /* Door Bell */
+       u8 __iomem *pcicfg;     /* On SH,BEx only. Shadow of PCI config space */
 
        struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
        struct be_dma_mem mbox_mem;
index 36916cfa70f9a7d31f36b7d80135e61f854a499a..7f05f309e93596778851fb280fa4c8bd068828c3 100644 (file)
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
 {
        int num_eqs, i = 0;
 
-       if (lancer_chip(adapter) && num > 8) {
-               while (num) {
-                       num_eqs = min(num, 8);
-                       __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
-                       i += num_eqs;
-                       num -= num_eqs;
-               }
-       } else {
-               __be_cmd_modify_eqd(adapter, set_eqd, num);
+       while (num) {
+               num_eqs = min(num, 8);
+               __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+               i += num_eqs;
+               num -= num_eqs;
        }
 
        return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
 
 /* Uses sycnhronous mcc */
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num)
+                      u32 num, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                               OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
                               wrb, NULL);
+       req->hdr.domain = domain;
 
        req->interface_id = if_id;
        req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
index db761e8e42a3224486ced01238f9fb6b61ad3579..a7634a3f052ac02786baa7e63e764d86262720a9 100644 (file)
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
 int be_cmd_get_fw_ver(struct be_adapter *adapter);
 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num);
+                      u32 num, u32 domain);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
index 0a816859aca507c3e762aeebbf6d0601e3529aec..e6b790f0d9dc1ebfe6103a1549e84cf058522027 100644 (file)
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
        for_each_set_bit(i, adapter->vids, VLAN_N_VID)
                vids[num++] = cpu_to_le16(i);
 
-       status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
+       status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
        if (status) {
                dev_err(dev, "Setting HW VLAN filtering failed\n");
                /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
        return 0;
 }
 
+static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
+{
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+       u16 vids[BE_NUM_VLANS_SUPPORTED];
+       int vf_if_id = vf_cfg->if_handle;
+       int status;
+
+       /* Enable Transparent VLAN Tagging */
+       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+       if (status)
+               return status;
+
+       /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
+       vids[0] = 0;
+       status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
+       if (!status)
+               dev_info(&adapter->pdev->dev,
+                        "Cleared guest VLANs on VF%d", vf);
+
+       /* After TVT is enabled, disallow VFs to program VLAN filters */
+       if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
+               status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
+                                                 ~BE_PRIV_FILTMGMT, vf + 1);
+               if (!status)
+                       vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
+       }
+       return 0;
+}
+
+static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
+{
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       /* Reset Transparent VLAN Tagging. */
+       status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
+                                      vf_cfg->if_handle, 0);
+       if (status)
+               return status;
+
+       /* Allow VFs to program VLAN filtering */
+       if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+               status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
+                                                 BE_PRIV_FILTMGMT, vf + 1);
+               if (!status) {
+                       vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+                       dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
+               }
+       }
+
+       dev_info(dev,
+                "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
+       return 0;
+}
+
 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
-       int status = 0;
+       int status;
 
        if (!sriov_enabled(adapter))
                return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
 
        if (vlan || qos) {
                vlan |= qos << VLAN_PRIO_SHIFT;
-               if (vf_cfg->vlan_tag != vlan)
-                       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
-                                                      vf_cfg->if_handle, 0);
+               status = be_set_vf_tvt(adapter, vf, vlan);
        } else {
-               /* Reset Transparent Vlan Tagging. */
-               status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
-                                              vf + 1, vf_cfg->if_handle, 0);
+               status = be_clear_vf_tvt(adapter, vf);
        }
 
        if (status) {
                dev_err(&adapter->pdev->dev,
-                       "VLAN %d config on VF %d failed : %#x\n", vlan,
-                       vf, status);
+                       "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
+                       status);
                return be_cmd_status(status);
        }
 
        vf_cfg->vlan_tag = vlan;
-
        return 0;
 }
 
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
                        }
                }
        } else {
-               pci_read_config_dword(adapter->pdev,
-                                     PCICFG_UE_STATUS_LOW, &ue_lo);
-               pci_read_config_dword(adapter->pdev,
-                                     PCICFG_UE_STATUS_HIGH, &ue_hi);
-               pci_read_config_dword(adapter->pdev,
-                                     PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
-               pci_read_config_dword(adapter->pdev,
-                                     PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+               ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
+               ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
+               ue_lo_mask = ioread32(adapter->pcicfg +
+                                     PCICFG_UE_STATUS_LOW_MASK);
+               ue_hi_mask = ioread32(adapter->pcicfg +
+                                     PCICFG_UE_STATUS_HI_MASK);
 
                ue_lo = (ue_lo & ~ue_lo_mask);
                ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
                        u32 cap_flags, u32 vf)
 {
        u32 en_flags;
-       int status;
 
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
 
        en_flags &= cap_flags;
 
-       status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                 if_handle, vf);
-
-       return status;
+       return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
 }
 
 static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                if (!BE3_chip(adapter)) {
                        status = be_cmd_get_profile_config(adapter, &res,
                                                           vf + 1);
-                       if (!status)
+                       if (!status) {
                                cap_flags = res.if_cap_flags;
+                               /* Prevent VFs from enabling VLAN promiscuous
+                                * mode
+                                */
+                               cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+                       }
                }
 
                status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
        struct device *dev = &adapter->pdev->dev;
        struct be_vf_cfg *vf_cfg;
        int status, old_vfs, vf;
-       u32 privileges;
 
        old_vfs = pci_num_vf(adapter->pdev);
 
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
 
        for_all_vfs(adapter, vf_cfg, vf) {
                /* Allow VFs to programs MAC/VLAN filters */
-               status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
-               if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+               status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
+                                                 vf + 1);
+               if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
                        status = be_cmd_set_fn_privileges(adapter,
-                                                         privileges |
+                                                         vf_cfg->privileges |
                                                          BE_PRIV_FILTMGMT,
                                                          vf + 1);
-                       if (!status)
+                       if (!status) {
+                               vf_cfg->privileges |= BE_PRIV_FILTMGMT;
                                dev_info(dev, "VF%d has FILTMGMT privilege\n",
                                         vf);
+                       }
                }
 
                /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
 
 static int be_map_pci_bars(struct be_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        u8 __iomem *addr;
 
        if (BEx_chip(adapter) && be_physfn(adapter)) {
-               adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+               adapter->csr = pci_iomap(pdev, 2, 0);
                if (!adapter->csr)
                        return -ENOMEM;
        }
 
-       addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
+       addr = pci_iomap(pdev, db_bar(adapter), 0);
        if (!addr)
                goto pci_map_err;
        adapter->db = addr;
 
+       if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
+               if (be_physfn(adapter)) {
+                       /* PCICFG is the 2nd BAR in BE2 */
+                       addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
+                       if (!addr)
+                               goto pci_map_err;
+                       adapter->pcicfg = addr;
+               } else {
+                       adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+               }
+       }
+
        be_roce_map_pci_bars(adapter);
        return 0;
 
 pci_map_err:
-       dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
+       dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
        be_unmap_pci_bars(adapter);
        return -ENOMEM;
 }
index 78e1ce09b1ab1deadad177472593e020a8f9d3c2..f6a3a7abd468e1f25fd4c33e874a38f0c85bc4dd 100644 (file)
@@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct device_node *node;
        int err = -ENXIO, i;
+       u32 mii_speed, holdtime;
 
        /*
         * The i.MX28 dual fec interfaces are not equal.
@@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * Reference Manual has an error on this, and gets fixed on i.MX6Q
         * document.
         */
-       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+       mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
        if (fep->quirks & FEC_QUIRK_ENET_MAC)
-               fep->phy_speed--;
-       fep->phy_speed <<= 1;
+               mii_speed--;
+       if (mii_speed > 63) {
+               dev_err(&pdev->dev,
+                       "fec clock (%lu) to fast to get right mii speed\n",
+                       clk_get_rate(fep->clk_ipg));
+               err = -EINVAL;
+               goto err_out;
+       }
+
+       /*
+        * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+        * MII_SPEED) register that defines the MDIO output hold time. Earlier
+        * versions are RAZ there, so just ignore the difference and write the
+        * register always.
+        * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+        * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+        * output.
+        * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+        * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+        * holdtime cannot result in a value greater than 3.
+        */
+       holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+       fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
        fep->mii_bus = mdiobus_alloc();
index 357e8b576905a4a61c312e6c9736c087b706b69d..56b774d3a13d4c3856ad01fff0ebe67c128e4f65 100644 (file)
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        ugeth->phy_interface = phy_interface;
        ugeth->max_speed = max_speed;
 
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                if (netif_msg_probe(ugeth))
index a681d7c0bb9f066f8d48ed068f68f0001dad8d0f..3350721bf515ea1d3c48df1592cddbf837c47946 100644 (file)
@@ -1993,7 +1993,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                        goto reset_slave;
                slave_state[slave].vhcr_dma = ((u64) param) << 48;
                priv->mfunc.master.slave_state[slave].cookie = 0;
-               mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
                break;
        case MLX4_COMM_CMD_VHCR1:
                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2225,6 +2224,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                for (i = 0; i < dev->num_slaves; ++i) {
                        s_state = &priv->mfunc.master.slave_state[i];
                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
                        for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
                                s_state->event_eq[j].eqn = -1;
                        __raw_writel((__force u32) 0,
index ebce5bb24df98b77eec38a1fc6c720987768934b..3485acf03014c7e4df64fa94a770fce00abe5a35 100644 (file)
@@ -2805,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_carrier_off(dev);
        mlx4_en_set_default_moderation(priv);
 
-       err = register_netdev(dev);
-       if (err) {
-               en_err(priv, "Netdev registration failed for port %d\n", port);
-               goto out;
-       }
-       priv->registered = 1;
-
        en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
@@ -2853,6 +2846,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
        mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
 
+       err = register_netdev(dev);
+       if (err) {
+               en_err(priv, "Netdev registration failed for port %d\n", port);
+               goto out;
+       }
+
+       priv->registered = 1;
+
        return 0;
 
 out:
index 264bc15c1ff212ad649c3547c8a8ffc0b2c22e43..6e70ffee8e87ee2fa9957adeb5994ca346bf155b 100644 (file)
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
 
                /* All active slaves need to receive the event */
                if (slave == ALL_SLAVES) {
-                       for (i = 0; i < dev->num_slaves; i++) {
-                               if (i != dev->caps.function &&
-                                   master->slave_state[i].active)
-                                       if (mlx4_GEN_EQE(dev, i, eqe))
-                                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
-                                                         i);
+                       for (i = 0; i <= dev->persist->num_vfs; i++) {
+                               if (mlx4_GEN_EQE(dev, i, eqe))
+                                       mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                                 i);
                        }
                } else {
                        if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
                             struct mlx4_eqe *eqe)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_slave_state *s_slave =
-               &priv->mfunc.master.slave_state[slave];
 
-       if (!s_slave->active) {
-               /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+       if (slave < 0 || slave > dev->persist->num_vfs ||
+           slave == dev->caps.function ||
+           !priv->mfunc.master.slave_state[slave].active)
                return;
-       }
 
        slave_event(dev, slave, eqe);
 }
index d97ca88c55b59e039af66991e4ad413f82984158..6e413ac4e94011c4ea85993b8471d5adfc84d5d3 100644 (file)
@@ -3095,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
        if (!priv->mfunc.master.slave_state)
                return -EINVAL;
 
+       /* check for slave valid, slave not PF, and slave active */
+       if (slave < 0 || slave > dev->persist->num_vfs ||
+           slave == dev->caps.function ||
+           !priv->mfunc.master.slave_state[slave].active)
+               return 0;
+
        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
 
        /* Create the event only if the slave is registered */
index 9fb6948e14c64ef424c032811364aaefc608d4cb..5cecec282aba8b471b60b91af594ba6374edcda7 100644 (file)
@@ -4468,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
        struct net_device *master = netdev_master_upper_dev_get(dev);
        int err = 0;
 
+       /* There are currently three cases handled here:
+        * 1. Joining a bridge
+        * 2. Leaving a previously joined bridge
+        * 3. Other, e.g. being added to or removed from a bond or openvswitch,
+        *    in which case nothing is done
+        */
        if (master && master->rtnl_link_ops &&
            !strcmp(master->rtnl_link_ops->kind, "bridge"))
                err = rocker_port_bridge_join(rocker_port, master);
-       else
+       else if (rocker_port_is_bridged(rocker_port))
                err = rocker_port_bridge_leave(rocker_port);
 
        return err;
index 924ea98bd5311b1c7197a9fe4e9b475b095069ce..54549a6223dd2f47f493ae112a06f12f3337120f 100644 (file)
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+                                  const void *iaddr, bool is_v6);
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
                                        const void *iaddr, bool is_v6);
 void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
index 2a175006028b347efe1fbe6d145f311f78c80bd7..b7877a194cfe430469af8031c58efdf307eafc4f 100644 (file)
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
        hash = (addr->atype == IPVL_IPV6) ?
               ipvlan_get_v6_hash(&addr->ip6addr) :
               ipvlan_get_v4_hash(&addr->ip4addr);
-       hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+       if (hlist_unhashed(&addr->hlnode))
+               hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
 }
 
 void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
 {
-       hlist_del_rcu(&addr->hlnode);
+       hlist_del_init_rcu(&addr->hlnode);
        if (sync)
                synchronize_rcu();
 }
 
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+                                  const void *iaddr, bool is_v6)
 {
-       struct ipvl_port *port = ipvlan->port;
        struct ipvl_addr *addr;
 
        list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
                    ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
                    (!is_v6 && addr->atype == IPVL_IPV4 &&
                    addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
-                       return true;
+                       return addr;
        }
+       return NULL;
+}
+
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
+{
+       struct ipvl_dev *ipvlan;
 
-       if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
-               return true;
+       ASSERT_RTNL();
 
+       list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+               if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
+                       return true;
+       }
        return false;
 }
 
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
        if (skb->protocol == htons(ETH_P_PAUSE))
                return;
 
-       list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
                if (local && (ipvlan == in_dev))
                        continue;
 
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
 mcast_acct:
                ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
        }
+       rcu_read_unlock();
 
        /* Locally generated? ...Forward a copy to the main-device as
         * well. On the RX side we'll ignore it (wont give it to any
index 4f4099d5603d0b64f2a15b66e86dbee90260cead..4fa14208d79931df1b5d8707744e93043b951424 100644 (file)
@@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
        if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
                list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
                        ipvlan_ht_addr_del(addr, !dev->dismantle);
-                       list_del_rcu(&addr->anode);
+                       list_del(&addr->anode);
                }
        }
        list_del_rcu(&ipvlan->pnode);
@@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 {
        struct ipvl_addr *addr;
 
-       if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+       if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
                netif_err(ipvlan, ifup, ipvlan->dev,
                          "Failed to add IPv6=%pI6c addr for %s intf\n",
                          ip6_addr, ipvlan->dev->name);
@@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        addr->master = ipvlan;
        memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
        addr->atype = IPVL_IPV6;
-       list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+       list_add_tail(&addr->anode, &ipvlan->addrs);
        ipvlan->ipv6cnt++;
-       ipvlan_ht_addr_add(ipvlan, addr);
+       /* If the interface is not up, the address will be added to the hash
+        * list by ipvlan_open.
+        */
+       if (netif_running(ipvlan->dev))
+               ipvlan_ht_addr_add(ipvlan, addr);
 
        return 0;
 }
@@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 {
        struct ipvl_addr *addr;
 
-       addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+       addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
        if (!addr)
                return;
 
        ipvlan_ht_addr_del(addr, true);
-       list_del_rcu(&addr->anode);
+       list_del(&addr->anode);
        ipvlan->ipv6cnt--;
        WARN_ON(ipvlan->ipv6cnt < 0);
        kfree_rcu(addr, rcu);
@@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
        struct ipvl_addr *addr;
 
-       if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+       if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
                netif_err(ipvlan, ifup, ipvlan->dev,
                          "Failed to add IPv4=%pI4 on %s intf.\n",
                          ip4_addr, ipvlan->dev->name);
@@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        addr->master = ipvlan;
        memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
        addr->atype = IPVL_IPV4;
-       list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+       list_add_tail(&addr->anode, &ipvlan->addrs);
        ipvlan->ipv4cnt++;
-       ipvlan_ht_addr_add(ipvlan, addr);
+       /* If the interface is not up, the address will be added to the hash
+        * list by ipvlan_open.
+        */
+       if (netif_running(ipvlan->dev))
+               ipvlan_ht_addr_add(ipvlan, addr);
        ipvlan_set_broadcast_mac_filter(ipvlan, true);
 
        return 0;
@@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
        struct ipvl_addr *addr;
 
-       addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+       addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
        if (!addr)
                return;
 
        ipvlan_ht_addr_del(addr, true);
-       list_del_rcu(&addr->anode);
+       list_del(&addr->anode);
        ipvlan->ipv4cnt--;
        WARN_ON(ipvlan->ipv4cnt < 0);
        if (!ipvlan->ipv4cnt)
index 5c55f11572baa0a3e0dd3877ae886935cb8a6eb9..75d6f26729a30e34cdaaf8334a9cc0aaa2a01c82 100644 (file)
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
                skb_put(skb, sizeof(padbytes));
        }
+
+       usbnet_set_skb_tx_stats(skb, 1, 0);
        return skb;
 }
 
index 9311a08565bed17cf5082a21b9c6dca7ab02dfe1..4545e78840b0d9dc80ae4392b36cfcf588f17c3a 100644 (file)
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
 #define DELL_VENDOR_ID         0x413C
 #define REALTEK_VENDOR_ID      0x0bda
 #define SAMSUNG_VENDOR_ID      0x04e8
+#define LENOVO_VENDOR_ID       0x17ef
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 80a844e0ae0383303d8fa4a6d4147fc99337f268..c3e4da9e79ca071a06082e965a3aec5bb206a77e 100644 (file)
@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 
        /* return skb */
        ctx->tx_curr_skb = NULL;
-       dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
 
        /* keep private stats: framing overhead and number of NTBs */
        ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
        ctx->tx_ntbs++;
 
-       /* usbnet has already counted all the framing overhead.
+       /* usbnet will count all the framing overhead by default.
         * Adjust the stats so that the tx_bytes counter show real
         * payload data instead.
         */
-       dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+       usbnet_set_skb_tx_stats(skb_out, n,
+                               ctx->tx_curr_frame_payload - skb_out->len);
 
        return skb_out;
 
index fe48f4c513730fa485aaf31dcf8be8ed119736f8..1762ad3910b2e75b55db894afee887f055977b22 100644 (file)
@@ -46,8 +46,7 @@ enum cx82310_status {
 };
 
 #define CMD_PACKET_SIZE        64
-/* first command after power on can take around 8 seconds */
-#define CMD_TIMEOUT    15000
+#define CMD_TIMEOUT    100
 #define CMD_REPLY_RETRY 5
 
 #define CX82310_MTU    1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
        ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
                           CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
        if (ret < 0) {
-               dev_err(&dev->udev->dev, "send command %#x: error %d\n",
-                       cmd, ret);
+               if (cmd != CMD_GET_LINK_STATUS)
+                       dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+                               cmd, ret);
                goto end;
        }
 
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
                                           buf, CMD_PACKET_SIZE, &actual_len,
                                           CMD_TIMEOUT);
                        if (ret < 0) {
-                               dev_err(&dev->udev->dev,
-                                       "reply receive error %d\n", ret);
+                               if (cmd != CMD_GET_LINK_STATUS)
+                                       dev_err(&dev->udev->dev,
+                                               "reply receive error %d\n",
+                                               ret);
                                goto end;
                        }
                        if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret;
        char buf[15];
        struct usb_device *udev = dev->udev;
+       u8 link[3];
+       int timeout = 50;
 
        /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
        if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
        if (!dev->partial_data)
                return -ENOMEM;
 
+       /* wait for firmware to become ready (indicated by the link being up) */
+       while (--timeout) {
+               ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
+                                 link, sizeof(link));
+               /* the command can time out during boot - it's not an error */
+               if (!ret && link[0] == 1 && link[2] == 1)
+                       break;
+               msleep(500);
+       };
+       if (!timeout) {
+               dev_err(&udev->dev, "firmware not ready in time\n");
+               return -ETIMEDOUT;
+       }
+
        /* enable ethernet mode (?) */
        ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
        if (ret) {
index 438fc6bcaef15538e9c151c46fce08dc67cec1a5..9f7c0ab3b3490b947d161428406612c80dc29360 100644 (file)
@@ -492,6 +492,7 @@ enum rtl8152_flags {
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK              0x0bda
 #define VENDOR_ID_SAMSUNG              0x04e8
+#define VENDOR_ID_LENOVO               0x17ef
 
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
@@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {}
 };
 
index b94a0fbb8b3b5a74ed466c4d5a66e25f4f2e0edc..953de13267df19d6fcefe60c2008f6222ca5775c 100644 (file)
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                skb_put(skb, sizeof(padbytes));
        }
 
+       usbnet_set_skb_tx_stats(skb, 1, 0);
        return skb;
 }
 
index 449835f4331e210daad6c8717430341c1c6996ef..777757ae19732ab10ab2283645464a87a02c7b20 100644 (file)
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
        struct usbnet           *dev = entry->dev;
 
        if (urb->status == 0) {
-               if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
-                       dev->net->stats.tx_packets++;
+               dev->net->stats.tx_packets += entry->packets;
                dev->net->stats.tx_bytes += entry->length;
        } else {
                dev->net->stats.tx_errors++;
@@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                } else
                        urb->transfer_flags |= URB_ZERO_PACKET;
        }
-       entry->length = urb->transfer_buffer_length = length;
+       urb->transfer_buffer_length = length;
+
+       if (info->flags & FLAG_MULTI_PACKET) {
+               /* Driver has set number of packets and a length delta.
+                * Calculate the complete length and ensure that it's
+                * positive.
+                */
+               entry->length += length;
+               if (WARN_ON_ONCE(entry->length <= 0))
+                       entry->length = length;
+       } else {
+               usbnet_set_skb_tx_stats(skb, 1, length);
+       }
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        retval = usb_autopm_get_interface_async(dev->intf);
index cb366adc820b17a667a908d79db82da8519f3783..f50a6bc5d06ee0b2bb3c83c971cd5f1d81afd690 100644 (file)
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
        struct ath_buf *bf = avp->av_bcbuf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
 
        ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
                avp->av_bslot);
 
        tasklet_disable(&sc->bcon_tasklet);
 
+       cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+
        if (bf && bf->bf_mpdu) {
                struct sk_buff *skb = bf->bf_mpdu;
                dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
        }
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
-               if ((vif->type != NL80211_IFTYPE_AP) ||
-                   (sc->nbcnvifs > 1)) {
+               if (vif->type != NL80211_IFTYPE_AP) {
                        ath_dbg(common, CONFIG,
                                "An AP interface is already present !\n");
                        return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
         * enabling/disabling SWBA.
         */
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
-               if (!bss_conf->enable_beacon &&
-                   (sc->nbcnvifs <= 1)) {
-                       cur_conf->enable_beacon = false;
-               } else if (bss_conf->enable_beacon) {
-                       cur_conf->enable_beacon = true;
-                       ath9k_cache_beacon_config(sc, ctx, bss_conf);
+               bool enabled = cur_conf->enable_beacon;
+
+               if (!bss_conf->enable_beacon) {
+                       cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+               } else {
+                       cur_conf->enable_beacon |= BIT(avp->av_bslot);
+                       if (!enabled)
+                               ath9k_cache_beacon_config(sc, ctx, bss_conf);
                }
        }
 
index 2b79a568e8032c1fbc33a0fb550a5da6140f2865..d23737342f4fe7b311c84a7d556cad6e94ecf1f1 100644 (file)
@@ -54,7 +54,7 @@ struct ath_beacon_config {
        u16 dtim_period;
        u16 bmiss_timeout;
        u8 dtim_count;
-       bool enable_beacon;
+       u8 enable_beacon;
        bool ibss_creator;
        u32 nexttbtt;
        u32 intval;
index 60aa8d71e753fa936909dcc98ef915c2a2208f60..8529014e1a5e1c1b4637abc1625fd177b407f602 100644 (file)
@@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
        ah->power_mode = ATH9K_PM_UNDEFINED;
        ah->htc_reset_init = true;
 
-       ah->tpc_enabled = true;
+       ah->tpc_enabled = false;
 
        ah->ani_function = ATH9K_ANI_ALL;
        if (!AR_SREV_9300_20_OR_LATER(ah))
index defb7a44e0bc1ff554195890dcccc31bfb0c9769..7748a1ccf14fdf4a6b2864441e9b041da35558a6 100644 (file)
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
        brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
        if (drvr->bus_if->wowl_supported)
                brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
-       brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+       if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
+               brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
 
        /* set chip related quirks */
        switch (drvr->bus_if->chip) {
index a6f22c32a27994000f578c2baff08a2f62042aa0..3811878ab9cd2057ad44785c7f3dad5cb5edcf07 100644 (file)
@@ -708,7 +708,6 @@ struct iwl_priv {
        unsigned long reload_jiffies;
        int reload_count;
        bool ucode_loaded;
-       bool init_ucode_run;            /* Don't run init uCode again */
 
        u8 plcp_delta_threshold;
 
index 47e64e8b9517d97dff6b96e544a731d498412ba1..cceb026e0793b45fa418e7c023855d5c0bd23a02 100644 (file)
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
                        BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
 
-       if (vif)
-               scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
-
-       IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
-       if (iwlagn_txfifo_flush(priv, scd_queues)) {
-               IWL_ERR(priv, "flush request fail\n");
-               goto done;
+       if (drop) {
+               IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
+                                   scd_queues);
+               if (iwlagn_txfifo_flush(priv, scd_queues)) {
+                       IWL_ERR(priv, "flush request fail\n");
+                       goto done;
+               }
        }
+
        IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+       iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
 done:
        mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
index 4dbef7e58c2e3dfba5be41c1f2f4cfe32c04727b..5244e43bfafbc4617720097660ec693f5d30742f 100644 (file)
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
        if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
                return 0;
 
-       if (priv->init_ucode_run)
-               return 0;
-
        iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
                                   calib_complete, ARRAY_SIZE(calib_complete),
                                   iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
         */
        ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
                                        UCODE_CALIB_TIMEOUT);
-       if (!ret)
-               priv->init_ucode_run = true;
 
        goto out;
 
index 996e7f16adf9feafc50cb5d56596a2b80e0cafb3..c7154ac42c8c366d093cfb462a46d9ac98c01e02 100644 (file)
@@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                op->name, err);
 #endif
        }
+       kfree(pieces);
        return;
 
  try_again:
index efa9688a4cf11150f6d1e92c71e0a367d90fea87..078f24cf4af3927c66e2540bc6cc6404d0c36fc7 100644 (file)
@@ -1278,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
+       if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+               return;
+
        if (!ieee80211_is_data(hdr->frame_control) ||
            info->flags & IEEE80211_TX_CTL_NO_ACK)
                return;
@@ -2511,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
 
+       if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+               /* if vif isn't initialized mvm doesn't know about
+                * this station, so don't do anything with the it
+                */
+               sta = NULL;
+               mvm_sta = NULL;
+       }
+
        /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
 
        /* Treat uninitialized rate scaling data same as non-existing. */
@@ -2827,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
                        (struct iwl_op_mode *)mvm_r;
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
+       if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+               return;
+
        /* Stop any ongoing aggregations as rs starts off assuming no agg */
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
                ieee80211_stop_tx_ba_session(sta, tid);
@@ -3587,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
 
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
 
-static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
 {
-       struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       struct iwl_mvm_sta *mvmsta;
+
+       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+
+       if (!mvmsta->vif)
+               return;
 
        debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
                            lq_sta, &rs_sta_dbgfs_scale_table_ops);
index f8d6f306dd76d276b82056c9fb1a74956e78ac3c..4b81c0bf63b0a86173afde87ef2822c0dfa8860f 100644 (file)
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
                             struct iwl_time_event_notif *notif)
 {
        if (!le32_to_cpu(notif->status)) {
+               if (te_data->vif->type == NL80211_IFTYPE_STATION)
+                       ieee80211_connection_loss(te_data->vif);
                IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
                iwl_mvm_te_clear_data(mvm, te_data);
                return;
index 07304e1fd64aa70b41d4c9a18762b48924a6f250..96a05406babf864fb0ff463e9b87dc4c27ba5299 100644 (file)
@@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        tid_data = &mvmsta->tid_data[tid];
 
-       if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
-                     tid_data->txq_id, tid, scd_flow)) {
+       if (tid_data->txq_id != scd_flow) {
+               IWL_ERR(mvm,
+                       "invalid BA notification: Q %d, tid %d, flow %d\n",
+                       tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
                return 0;
        }
index dbd6bcf5220563c03727bccde83083032b977ff8..686dd301cd536b68d616ea0507a3e1742e12240c 100644 (file)
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
index a62170ea04818e37790eeec99fc2047e255b030f..8c45cf44ce24bac6393902ae2daf708b0ce11e6a 100644 (file)
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        /*This is for new trx flow*/
        struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
        u8 temp_one = 1;
+       u8 *entry;
 
        memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
+       if (rtlpriv->use_new_trx_flow)
+               entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+       else
+               entry = (u8 *)(&ring->desc[ring->idx]);
+       if (pskb) {
+               pci_unmap_single(rtlpci->pdev,
+                                rtlpriv->cfg->ops->get_desc(
+                                (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+                                pskb->len, PCI_DMA_TODEVICE);
                kfree_skb(pskb);
+       }
 
        /*NB: the beacon data buffer must be 32-bit aligned. */
        pskb = ieee80211_beacon_get(hw, mac->vif);
index ad2906919d4589f4edbc0cd599a2fe7aa2938922..78a7dcbec7d8990ac37adad938a0aff3420423e2 100644 (file)
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
        return NULL;
 }
 
-static int of_empty_ranges_quirk(void)
+static int of_empty_ranges_quirk(struct device_node *np)
 {
        if (IS_ENABLED(CONFIG_PPC)) {
-               /* To save cycles, we cache the result */
+               /* To save cycles, we cache the result for global "Mac" setting */
                static int quirk_state = -1;
 
+               /* PA-SEMI sdc DT bug */
+               if (of_device_is_compatible(np, "1682m-sdc"))
+                       return true;
+
+               /* Make quirk cached */
                if (quirk_state < 0)
                        quirk_state =
                                of_machine_is_compatible("Power Macintosh") ||
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
         * This code is only enabled on powerpc. --gcl
         */
        ranges = of_get_property(parent, rprop, &rlen);
-       if (ranges == NULL && !of_empty_ranges_quirk()) {
+       if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
                pr_debug("OF: no ranges; cannot translate\n");
                return 1;
        }
index 9205f433573cc124bd84701f6ee6324969b093e9..18198316b6cf15c406c6f9eb243bc39601b97979 100644 (file)
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
        if (!pmic)
                return -ENOMEM;
 
+       if (of_device_is_compatible(node, "ti,tps659038-pmic"))
+               palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
+                                                       TPS659038_REGEN2_CTRL;
+
        pmic->dev = &pdev->dev;
        pmic->palmas = palmas;
        palmas->pmic = pmic;
index e2436d140175a109907e55eacafeb1906105748d..3a6fd3a8a2ec63d389a2f4dbf948f5c2090ecc3a 100644 (file)
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
        mrst->dev = NULL;
 }
 
-#ifdef CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int mrst_suspend(struct device *dev)
 {
        struct mrst_rtc *mrst = dev_get_drvdata(dev);
        unsigned char   tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
  */
 static inline int mrst_poweroff(struct device *dev)
 {
-       return mrst_suspend(dev, PMSG_HIBERNATE);
+       return mrst_suspend(dev);
 }
 
 static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
+#define MRST_PM_OPS (&mrst_pm_ops)
+
 #else
-#define        mrst_suspend    NULL
-#define        mrst_resume     NULL
+#define MRST_PM_OPS NULL
 
 static inline int mrst_poweroff(struct device *dev)
 {
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
        .remove         = vrtc_mrst_platform_remove,
        .shutdown       = vrtc_mrst_platform_shutdown,
        .driver = {
-               .name           = (char *) driver_name,
-               .suspend        = mrst_suspend,
-               .resume         = mrst_resume,
+               .name   = driver_name,
+               .pm     = MRST_PM_OPS,
        }
 };
 
index 9219953ee949a9dfaf1ff0f044e41a3e5c13adc7..d9afc51af7d3dbc7df2e38865596ba3473a1f708 100644 (file)
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
 };
 
 static struct ata_port_info sata_port_info = {
-       .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+       .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+                         ATA_FLAG_SAS_HOST,
        .pio_mask       = ATA_PIO4_ONLY,
        .mwdma_mask     = ATA_MWDMA2,
        .udma_mask      = ATA_UDMA6,
index 932d9cc98d2fc807c4ef0e7715fe922803bbc29c..9c706d8c144174dae8cf87c1aa62a95a98f961e4 100644 (file)
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
 };
 
 static struct ata_port_info sata_port_info = {
-       .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+       .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+                ATA_FLAG_SAS_HOST,
        .pio_mask = ATA_PIO4,
        .mwdma_mask = ATA_MWDMA2,
        .udma_mask = ATA_UDMA6,
index 3ce39d10fafbc270d47a17107b7cc5ce257dca94..4f8c798e0633a81483c3b05af46805b1848ba386 100644 (file)
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
 {
        struct dw_spi *dws = arg;
 
-       if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
+       clear_bit(TX_BUSY, &dws->dma_chan_busy);
+       if (test_bit(RX_BUSY, &dws->dma_chan_busy))
                return;
        dw_spi_xfer_done(dws);
 }
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
 {
        struct dw_spi *dws = arg;
 
-       if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+       clear_bit(RX_BUSY, &dws->dma_chan_busy);
+       if (test_bit(TX_BUSY, &dws->dma_chan_busy))
                return;
        dw_spi_xfer_done(dws);
 }
index ff9cdbdb6672371df54b6c59ce54579a46758602..2b2c359f5a501da32a38af38cfe0c0956b0e23c9 100644 (file)
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
        struct resource *res;
        struct device *dev;
        void __iomem *base;
-       u32 max_freq, iomode;
+       u32 max_freq, iomode, num_cs;
        int ret, irq, size;
 
        dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
        }
 
        /* use num-cs unless not present or out of range */
-       if (of_property_read_u16(dev->of_node, "num-cs",
-                       &master->num_chipselect) ||
-                       (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+       if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+           num_cs > SPI_NUM_CHIPSELECTS)
                master->num_chipselect = SPI_NUM_CHIPSELECTS;
+       else
+               master->num_chipselect = num_cs;
 
        master->bus_num = pdev->id;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
index c64a3e59fce30a7f9658afcca246a6d6872627da..57a195041dc72e019a02b3c7c4b6f4c2a6fac59b 100644 (file)
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
                                "failed to unprepare message: %d\n", ret);
                }
        }
+
+       trace_spi_message_done(mesg);
+
        master->cur_msg_prepared = false;
 
        mesg->state = NULL;
        if (mesg->complete)
                mesg->complete(mesg->context);
-
-       trace_spi_message_done(mesg);
 }
 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
 
index 75b3603906c1457dc94e915ee84bb681d087029e..f0d22cdb51cd8eee442b53c259aa7ee355443c4b 100644 (file)
@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
        bool "SSB Broadcom MIPS core driver"
        depends on SSB && MIPS
        select SSB_SERIAL
+       select SSB_SFLASH
        help
          Driver for the Sonics Silicon Backplane attached
          Broadcom MIPS core.
index 24183028bd712b11af46cd4531f60b33b4e57338..6d5b38d6957852ee81caa09cb6e3c047c55d26b8 100644 (file)
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
 config IIO_SIMPLE_DUMMY_BUFFER
        bool "Buffered capture support"
        select IIO_BUFFER
+       select IIO_TRIGGER
        select IIO_KFIFO_BUF
        help
          Add buffered data capture to the simple dummy driver.
index fd171d8b38fbcc444f3e7118bb66b2d51bdf698c..90cc18b703cf67ae6c089c36d51d8ebde6f28106 100644 (file)
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
        mutex_init(&data->lock);
 
        indio_dev->dev.parent = dev;
+       indio_dev->name = dev->driver->name;
        indio_dev->info = &hmc5843_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = data->variant->channels;
index b1893f3f88f1c6b6fdb2e8e42ad565ba23e18fd1..3ad1458bfeb0fc32afe790b2aa0f3e36961b7b45 100644 (file)
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
        writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
                        sport->port.membase + UARTPFIFO);
 
+       /* explicitly clear RDRF */
+       readb(sport->port.membase + UARTSR1);
+
        /* flush Tx and Rx FIFO */
        writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
                        sport->port.membase + UARTCFIFO);
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
        sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
+       sport->port.fifosize = sport->txfifo_size;
+
        sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
index af821a9087204ec654f637a617513ba06d68a0ef..cf08876922f1446e55a2d8ca79bf87e0d4e24fed 100644 (file)
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
                        free_irq(ourport->tx_irq, ourport);
                tx_enabled(port) = 0;
                ourport->tx_claimed = 0;
+               ourport->tx_mode = 0;
        }
 
        if (ourport->rx_claimed) {
index a7865c4b04980898b49317386ad6138aab051bc5..0827d7c965276382418f0a602ec5c1412c023142 100644 (file)
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
                status = PORT_PLC;
                port_change_bit = "link state";
                break;
+       case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+               status = PORT_CEC;
+               port_change_bit = "config error";
+               break;
        default:
                /* Should never happen */
                return;
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
+               if ((raw_port_status & PORT_CEC))
+                       status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
        }
 
        if (hcd->speed != HCD_USB3) {
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_C_OVER_CURRENT:
                case USB_PORT_FEAT_C_ENABLE:
                case USB_PORT_FEAT_C_PORT_LINK_STATE:
+               case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
                        xhci_clear_port_change_bit(xhci, wValue, wIndex,
                                        port_array[wIndex], temp);
                        break;
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
         */
        status = bus_state->resuming_ports;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index fd53c9ebd662a5fb4593c99ce5dd7c2552ad83c5..2af32e26fafc3727279fe656fbbcaf158736371d 100644 (file)
@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
+               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                        pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 * PPT chipsets.
                 */
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
-               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
index f32c292cc8689d81bfce947f7c587603e95c09be..3fc4fe7702533b785bc22ead9fe17939e1f365f8 100644 (file)
@@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
 
        if (udc->driver) {
                dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
-               spin_unlock(&udc->lock);
+               spin_unlock_irqrestore(&udc->lock, flags);
                return -EBUSY;
        }
 
index 3086dec0ef53bbd5d5d3d21087b91469983492fb..8eb68a31cab6c4021617ca555cd58b086872c112 100644 (file)
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        /*
         * ELV devices:
         */
@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
 {
        struct usb_device *udev = serial->dev;
 
-       if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
-           (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+       if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
+               return ftdi_jtag_probe(serial);
+
+       if (udev->product &&
+               (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+                !strcmp(udev->product, "SNAP Connect E10")))
                return ftdi_jtag_probe(serial);
 
        return 0;
index 56b1b55c4751696b2e89633ea90bfe1c2940c436..4e4f46f3c89c025670d42860756f39b2bb62ae24 100644 (file)
  */
 #define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
 
+/*
+ * Synapse Wireless product ids (FTDI_VID)
+ * http://www.synapse-wireless.com
+ */
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+
 
 /********************************/
 /** third-party VID/PID combos **/
index dd97d8b572c336e03c7c4c282fbe7e38c44646a4..4f7e072e4e001e9f7c439114f13b52a77d3250d6 100644 (file)
@@ -61,6 +61,7 @@ struct keyspan_pda_private {
 /* For Xircom PGSDB9 and older Entrega version of the same device */
 #define XIRCOM_VENDOR_ID               0x085a
 #define XIRCOM_FAKE_ID                 0x8027
+#define XIRCOM_FAKE_ID_2               0x8025 /* "PGMFHUB" serial */
 #define ENTREGA_VENDOR_ID              0x1645
 #define ENTREGA_FAKE_ID                        0x8093
 
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
 #endif
 #ifdef XIRCOM
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
 #endif
        { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
 #ifdef XIRCOM
 static const struct usb_device_id id_table_fake_xircom[] = {
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
        { }
 };
index c8def68d9e4cf30c391fabc36981f96b4cb9d72a..0deaa4f971f5ff1fdfde6d063314a45bd99b9707 100644 (file)
 #define PDC_WDT_MIN_TIMEOUT            1
 #define PDC_WDT_DEF_TIMEOUT            64
 
-static int heartbeat;
+static int heartbeat = PDC_WDT_DEF_TIMEOUT;
 module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
-       "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
+       "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
        pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
        pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
        pdc_wdt->wdt_dev.parent = &pdev->dev;
+       watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
 
        ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
        if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
        watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
 
        platform_set_drvdata(pdev, pdc_wdt);
-       watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
 
        ret = watchdog_register_device(&pdc_wdt->wdt_dev);
        if (ret)
index a87f6df6e85f32993db3907d66f3066c8820a91d..938b987de551bdea7615a701007d0125f9a10d8b 100644 (file)
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
        u32 reg;
        struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
        void __iomem *wdt_base = mtk_wdt->wdt_base;
-       u32 ret;
+       int ret;
 
        ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
        if (ret < 0)
index b812462083fcaf8d32c215327d28b7c4b7e6e4da..94d96809e686b9ffd1f83c76681c8c3ff0a7cf92 100644 (file)
@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
 
          In that case step 3 should be omitted.
 
+config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+       int "Hotplugged memory limit (in GiB) for a PV guest"
+       default 512 if X86_64
+       default 4 if X86_32
+       range 0 64 if X86_32
+       depends on XEN_HAVE_PVMMU
+       depends on XEN_BALLOON_MEMORY_HOTPLUG
+       help
+         Maxmium amount of memory (in GiB) that a PV guest can be
+         expanded to when using memory hotplug.
+
+         A PV guest can have more memory than this limit if is
+         started with a larger maximum.
+
+         This value is used to allocate enough space in internal
+         tables needed for physical memory administration.
+
 config XEN_SCRUB_PAGES
        bool "Scrub pages before returning them to system"
        depends on XEN_BALLOON
index 0b52d92cb2e52d03899dd410d8a58e7640c8d8d5..fd933695f2328f29c2493ee751f22230ec68cbb1 100644 (file)
@@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit)
        balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
        nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
 
+#ifdef CONFIG_XEN_HAVE_PVMMU
+        /*
+         * add_memory() will build page tables for the new memory so
+         * the p2m must contain invalid entries so the correct
+         * non-present PTEs will be written.
+         *
+         * If a failure occurs, the original (identity) p2m entries
+         * are not restored since this region is now known not to
+         * conflict with any devices.
+         */ 
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               unsigned long pfn, i;
+
+               pfn = PFN_DOWN(hotplug_start_paddr);
+               for (i = 0; i < balloon_hotplug; i++) {
+                       if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+                               pr_warn("set_phys_to_machine() failed, no memory added\n");
+                               return BP_ECANCELED;
+                       }
+                }
+       }
+#endif
+
        rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
        if (rc) {
index d2468bf95669850520a2a0dc852b1185c5234c7b..a91795e01a7ff0c0e85abf1bdf69f3d1d828b231 100644 (file)
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
        boff = tmp % bsize;
        if (boff) {
                bh = affs_bread_ino(inode, bidx, 0);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
+               if (IS_ERR(bh)) {
+                       written = PTR_ERR(bh);
+                       goto err_first_bh;
+               }
                tmp = min(bsize - boff, to - from);
                BUG_ON(boff + tmp > bsize || tmp > bsize);
                memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                bidx++;
        } else if (bidx) {
                bh = affs_bread_ino(inode, bidx - 1, 0);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
+               if (IS_ERR(bh)) {
+                       written = PTR_ERR(bh);
+                       goto err_first_bh;
+               }
        }
        while (from + bsize <= to) {
                prev_bh = bh;
                bh = affs_getemptyblk_ino(inode, bidx);
                if (IS_ERR(bh))
-                       goto out;
+                       goto err_bh;
                memcpy(AFFS_DATA(bh), data + from, bsize);
                if (buffer_new(bh)) {
                        AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                prev_bh = bh;
                bh = affs_bread_ino(inode, bidx, 1);
                if (IS_ERR(bh))
-                       goto out;
+                       goto err_bh;
                tmp = min(bsize, to - from);
                BUG_ON(tmp > bsize);
                memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
        if (tmp > inode->i_size)
                inode->i_size = AFFS_I(inode)->mmu_private = tmp;
 
+err_first_bh:
        unlock_page(page);
        page_cache_release(page);
 
        return written;
 
-out:
+err_bh:
        bh = prev_bh;
        if (!written)
                written = PTR_ERR(bh);
index 4ac7445e6ec70516848e942f54a6846a8541113b..aa0dc2573374184597b9e449fad6467f924f0f45 100644 (file)
@@ -1,6 +1,9 @@
 /*
  *   fs/cifs/cifsencrypt.c
  *
+ *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
+ *   for more detailed information
+ *
  *   Copyright (C) International Business Machines  Corp., 2005,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                                 __func__);
                        return rc;
                }
-       } else if (ses->serverName) {
+       } else {
+               /* We use ses->serverName if no domain name available */
                len = strlen(ses->serverName);
 
                server = kmalloc(2 + (len * 2), GFP_KERNEL);
index d3aa999ab78520fcd4819f99548247e231df591b..480cf9c81d505b8351dd76eee0f012110c3f2b9c 100644 (file)
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                pr_warn("CIFS: username too long\n");
                                goto cifs_parse_mount_err;
                        }
+
+                       kfree(vol->username);
                        vol->username = kstrdup(string, GFP_KERNEL);
                        if (!vol->username)
                                goto cifs_parse_mount_err;
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
 
+                       kfree(vol->domainname);
                        vol->domainname = kstrdup(string, GFP_KERNEL);
                        if (!vol->domainname) {
                                pr_warn("CIFS: no memory for domainname\n");
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        }
 
                         if (strncasecmp(string, "default", 7) != 0) {
+                               kfree(vol->iocharset);
                                vol->iocharset = kstrdup(string,
                                                         GFP_KERNEL);
                                if (!vol->iocharset) {
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
                 * calling name ends in null (byte 16) from old smb
                 * convention.
                 */
-               if (server->workstation_RFC1001_name &&
-                   server->workstation_RFC1001_name[0] != 0)
+               if (server->workstation_RFC1001_name[0] != 0)
                        rfc1002mangle(ses_init_buf->trailer.
                                      session_req.calling_name,
                                      server->workstation_RFC1001_name,
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
 #endif /* CIFS_WEAK_PW_HASH */
                rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
                                        bcc_ptr, nls_codepage);
+               if (rc) {
+                       cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
+                                __func__, rc);
+                       cifs_buf_release(smb_buffer);
+                       return rc;
+               }
 
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
                if (ses->capabilities & CAP_UNICODE) {
index a94b3e67318283dd54d61fc595ecb2037ba3a515..ca30c391a894a0e9df8eac6ed1ede194c89f1885 100644 (file)
@@ -1823,6 +1823,7 @@ refind_writable:
                        cifsFileInfo_put(inv_file);
                        spin_lock(&cifs_file_list_lock);
                        ++refind;
+                       inv_file = NULL;
                        goto refind_writable;
                }
        }
index 2d4f37235ed0fa360782ae237c89fccccbf8b719..3e126d7bb2ea5bec97c9d6e02973a49886261580 100644 (file)
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                                cifs_buf_release(srchinf->ntwrk_buf_start);
                        }
                        kfree(srchinf);
+                       if (rc)
+                               goto cgii_exit;
        } else
                goto cgii_exit;
 
index 689f035915cf70f075d71fca5e281ec009c5420a..22dfdf17d06547f3d1b3abbc302bb03abf1b047b 100644 (file)
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
 
        /* return pointer to beginning of data area, ie offset from SMB start */
        if ((*off != 0) && (*len != 0))
-               return hdr->ProtocolId + *off;
+               return (char *)(&hdr->ProtocolId[0]) + *off;
        else
                return NULL;
 }
index 96b5d40a2ece611b27ed19668cc4b7b665605113..eab05e1aa587424863d6914eb351da9fdcf17437 100644 (file)
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
 
                        /* No need to change MaxChunks since already set to 1 */
                        chunk_sizes_updated = true;
-               }
+               } else
+                       goto cchunk_out;
        }
 
 cchunk_out:
index 3417340bf89e677fe0c46bf98cf922dd39d29a3a..65cd7a84c8bc3206033a917fe9d98fc939cbe1af 100644 (file)
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct TCP_Server_Info *server;
-       struct cifs_ses *ses = tcon->ses;
+       struct cifs_ses *ses;
        struct kvec iov[2];
        int resp_buftype;
        int num_iovecs;
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (plen)
                *plen = 0;
 
+       if (tcon)
+               ses = tcon->ses;
+       else
+               return -EIO;
+
        if (ses && (ses->server))
                server = ses->server;
        else
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
 
        if ((rc != 0) && (rc != -EINVAL)) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                goto ioctl_exit;
        } else if (rc == -EINVAL) {
                if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
                    (opcode != FSCTL_SRV_COPYCHUNK)) {
-                       if (tcon)
-                               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                        goto ioctl_exit;
                }
        }
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
 
-       if ((rc != 0) && tcon)
+       if (rc != 0)
                cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
 
        free_rsp_buf(resp_buftype, iov[0].iov_base);
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        struct kvec iov[2];
        int rc = 0;
        int len;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        unsigned char *bufptr;
        struct TCP_Server_Info *server;
        struct cifs_ses *ses = tcon->ses;
index e907052eeadb69f683df3c7d8838106c6e36905b..32a8bbd7a9ad1121f9b100da08f80500736bcebf 100644 (file)
@@ -53,6 +53,18 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
+/*
+ * If an inode is constantly having its pages dirtied, but then the
+ * updates stop dirtytime_expire_interval seconds in the past, it's
+ * possible for the worst case time between when an inode has its
+ * timestamps updated and when they finally get written out to be two
+ * dirtytime_expire_intervals.  We set the default to 12 hours (in
+ * seconds), which means most of the time inodes will have their
+ * timestamps written to disk after 12 hours, but in the worst case a
+ * few inodes might not their timestamps updated for 24 hours.
+ */
+unsigned int dirtytime_expire_interval = 12 * 60 * 60;
+
 /**
  * writeback_in_progress - determine whether there is writeback in progress
  * @bdi: the device's backing_dev_info structure.
@@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
 
        if ((flags & EXPIRE_DIRTY_ATIME) == 0)
                older_than_this = work->older_than_this;
-       else if ((work->reason == WB_REASON_SYNC) == 0) {
-               expire_time = jiffies - (HZ * 86400);
+       else if (!work->for_sync) {
+               expire_time = jiffies - (dirtytime_expire_interval * HZ);
                older_than_this = &expire_time;
        }
        while (!list_empty(delaying_queue)) {
@@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
                 */
                redirty_tail(inode, wb);
        } else if (inode->i_state & I_DIRTY_TIME) {
+               inode->dirtied_when = jiffies;
                list_move(&inode->i_wb_list, &wb->b_dirty_time);
        } else {
                /* The inode is clean. Remove from writeback lists. */
@@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        spin_lock(&inode->i_lock);
 
        dirty = inode->i_state & I_DIRTY;
-       if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) &&
-            (inode->i_state & I_DIRTY_TIME)) ||
-           (inode->i_state & I_DIRTY_TIME_EXPIRED)) {
-               dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
-               trace_writeback_lazytime(inode);
-       }
+       if (inode->i_state & I_DIRTY_TIME) {
+               if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
+                   unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
+                   unlikely(time_after(jiffies,
+                                       (inode->dirtied_time_when +
+                                        dirtytime_expire_interval * HZ)))) {
+                       dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
+                       trace_writeback_lazytime(inode);
+               }
+       } else
+               inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
        inode->i_state &= ~dirty;
 
        /*
@@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
        rcu_read_unlock();
 }
 
+/*
+ * Wake up bdi's periodically to make sure dirtytime inodes gets
+ * written back periodically.  We deliberately do *not* check the
+ * b_dirtytime list in wb_has_dirty_io(), since this would cause the
+ * kernel to be constantly waking up once there are any dirtytime
+ * inodes on the system.  So instead we define a separate delayed work
+ * function which gets called much more rarely.  (By default, only
+ * once every 12 hours.)
+ *
+ * If there is any other write activity going on in the file system,
+ * this function won't be necessary.  But if the only thing that has
+ * happened on the file system is a dirtytime inode caused by an atime
+ * update, we need this infrastructure below to make sure that inode
+ * eventually gets pushed out to disk.
+ */
+static void wakeup_dirtytime_writeback(struct work_struct *w);
+static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
+
+static void wakeup_dirtytime_writeback(struct work_struct *w)
+{
+       struct backing_dev_info *bdi;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
+               if (list_empty(&bdi->wb.b_dirty_time))
+                       continue;
+               bdi_wakeup_thread(bdi);
+       }
+       rcu_read_unlock();
+       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+}
+
+static int __init start_dirtytime_writeback(void)
+{
+       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+       return 0;
+}
+__initcall(start_dirtytime_writeback);
+
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret == 0 && write)
+               mod_delayed_work(system_wq, &dirtytime_work, 0);
+       return ret;
+}
+
 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
 {
        if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        }
 
                        inode->dirtied_when = jiffies;
-                       list_move(&inode->i_wb_list, dirtytime ?
-                                 &bdi->wb.b_dirty_time : &bdi->wb.b_dirty);
+                       if (dirtytime)
+                               inode->dirtied_time_when = jiffies;
+                       if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
+                               list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+                       else
+                               list_move(&inode->i_wb_list,
+                                         &bdi->wb.b_dirty_time);
                        spin_unlock(&bdi->wb.list_lock);
                        trace_writeback_dirty_inode_enqueue(inode);
 
index 6e560d56094b2ccaf7091ec784a6a039af214f43..754fdf8c6356388f16bbdc285240394b350bd90e 100644 (file)
@@ -131,13 +131,16 @@ skip:
        hfs_bnode_write(node, entry, data_off + key_len, entry_len);
        hfs_bnode_dump(node);
 
-       if (new_node) {
-               /* update parent key if we inserted a key
-                * at the start of the first node
-                */
-               if (!rec && new_node != node)
-                       hfs_brec_update_parent(fd);
+       /*
+        * update parent key if we inserted a key
+        * at the start of the node and it is not the new node
+        */
+       if (!rec && new_node != node) {
+               hfs_bnode_read_key(node, fd->search_key, data_off + size);
+               hfs_brec_update_parent(fd);
+       }
 
+       if (new_node) {
                hfs_bnode_put(fd->bnode);
                if (!new_node->parent) {
                        hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
                goto again;
        }
 
-       if (!rec)
-               hfs_brec_update_parent(fd);
-
        return 0;
 }
 
@@ -370,6 +370,8 @@ again:
        if (IS_ERR(parent))
                return PTR_ERR(parent);
        __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
+       if (fd->record < 0)
+               return -ENOENT;
        hfs_bnode_dump(parent);
        rec = fd->record;
 
index 528fedfda15e6432bd69b80c7bd8faceb7d1351d..40bc384728c0e07b637dccfa755fe49b5751bf86 100644 (file)
@@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 {
        int error = 0;
-       struct file_lock *new_fl;
        struct file_lock_context *ctx = inode->i_flctx;
-       struct file_lock *fl;
+       struct file_lock *new_fl, *fl, *tmp;
        unsigned long break_time;
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
        LIST_HEAD(dispose);
@@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                        break_time++;   /* so that 0 means no break time */
        }
 
-       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+       list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
                if (!leases_conflict(fl, new_fl))
                        continue;
                if (want_write) {
index cdbc78c7254218c80213877e90c08ec107aea223..03d647bf195d78bb3d6611553c9ad3e6fa4385a2 100644 (file)
@@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
        seg->offset = iomap.offset;
        seg->length = iomap.length;
 
-       dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es);
+       dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
        return 0;
 
 out_error:
index 9da89fddab338fcebbeb51fc0ec71534e0defc5a..9aa2796da90d9169488a625d5f80e90010971ff4 100644 (file)
@@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
 
                p = xdr_decode_hyper(p, &bex.foff);
                if (bex.foff & (block_size - 1)) {
-                       dprintk("%s: unaligned offset %lld\n",
+                       dprintk("%s: unaligned offset 0x%llx\n",
                                __func__, bex.foff);
                        goto fail;
                }
                p = xdr_decode_hyper(p, &bex.len);
                if (bex.len & (block_size - 1)) {
-                       dprintk("%s: unaligned length %lld\n",
+                       dprintk("%s: unaligned length 0x%llx\n",
                                __func__, bex.foff);
                        goto fail;
                }
                p = xdr_decode_hyper(p, &bex.soff);
                if (bex.soff & (block_size - 1)) {
-                       dprintk("%s: unaligned disk offset %lld\n",
+                       dprintk("%s: unaligned disk offset 0x%llx\n",
                                __func__, bex.soff);
                        goto fail;
                }
index 1028a062954357c06005dc7b0d6f61a10ea6f418..6904213a436368e47628af85701a3beb68e0550b 100644 (file)
@@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
 {
        struct super_block *sb = exp->ex_path.mnt->mnt_sb;
 
-       if (exp->ex_flags & NFSEXP_NOPNFS)
+       if (!(exp->ex_flags & NFSEXP_PNFS))
                return;
 
        if (sb->s_export_op->get_uuid &&
@@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
                        list_move_tail(&lp->lo_perstate, reaplist);
                        return;
                }
-               end = seg->offset;
+               lo->offset = layout_end(seg);
        } else {
                /* retain the whole layout segment on a split. */
                if (layout_end(seg) < end) {
                        dprintk("%s: split not supported\n", __func__);
                        return;
                }
-
-               lo->offset = layout_end(seg);
+               end = seg->offset;
        }
 
        layout_update_len(lo, end);
@@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
 
        spin_lock(&clp->cl_lock);
        list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
+               if (ls->ls_layout_type != lrp->lr_layout_type)
+                       continue;
+
                if (lrp->lr_return_type == RETURN_FSID &&
                    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
                                   &cstate->current_fh.fh_handle))
@@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
 
        rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
 
+       trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
+
        printk(KERN_WARNING
                "nfsd: client %s failed to respond to layout recall. "
                "  Fencing..\n", addr_str);
index d30bea8d0277ab3bfa45e7d66c38ed410026f585..92b9d97aff4f1adfe24ed607d11661196a500ffc 100644 (file)
@@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
                nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
 
        gdp->gd_notify_types &= ops->notify_types;
-       exp_put(exp);
 out:
+       exp_put(exp);
        return nfserr;
 }
 
index d2f2c37dc2dbd2649399fe2ddad4032a5025d337..8ba1d888f1e624d672453bd1eea20c40e054f746 100644 (file)
@@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
        } else
                nfs4_free_openowner(&oo->oo_owner);
        spin_unlock(&clp->cl_lock);
-       return oo;
+       return ret;
 }
 
 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
@@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
        } else
                nfs4_free_lockowner(&lo->lo_owner);
        spin_unlock(&clp->cl_lock);
-       return lo;
+       return ret;
 }
 
 static void
index df5e66caf100ca303005018932ccc8edadab4089..5fb7e78169a6b27a1a5a4d5e9ec354c9e32f6c43 100644 (file)
@@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
        p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
        p = xdr_decode_hyper(p, &lgp->lg_seg.length);
        p = xdr_decode_hyper(p, &lgp->lg_minlength);
-       nfsd4_decode_stateid(argp, &lgp->lg_sid);
+
+       status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
+       if (status)
+               return status;
+
        READ_BUF(4);
        lgp->lg_maxcount = be32_to_cpup(p++);
 
@@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
        p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
        p = xdr_decode_hyper(p, &lcp->lc_seg.length);
        lcp->lc_reclaim = be32_to_cpup(p++);
-       nfsd4_decode_stateid(argp, &lcp->lc_sid);
+
+       status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
+       if (status)
+               return status;
+
        READ_BUF(4);
        lcp->lc_newoffset = be32_to_cpup(p++);
        if (lcp->lc_newoffset) {
@@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
                READ_BUF(16);
                p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
                p = xdr_decode_hyper(p, &lrp->lr_seg.length);
-               nfsd4_decode_stateid(argp, &lrp->lr_sid);
+
+               status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
+               if (status)
+                       return status;
+
                READ_BUF(4);
                lrp->lrf_body_len = be32_to_cpup(p++);
                if (lrp->lrf_body_len > 0) {
@@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
                return nfserr_resource;
        *p++ = cpu_to_be32(lrp->lrs_present);
        if (lrp->lrs_present)
-               nfsd4_encode_stateid(xdr, &lrp->lr_sid);
+               return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
        return nfs_ok;
 }
 #endif /* CONFIG_NFSD_PNFS */
index 83a9694ec485b0593e3de847464243640fcd7186..46ec934f5dee8c529020558bed4cd820956734f1 100644 (file)
@@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void)
 {
        unsigned int hashsize;
        unsigned int i;
+       int status = 0;
 
        max_drc_entries = nfsd_cache_size_limit();
        atomic_set(&num_drc_entries, 0);
        hashsize = nfsd_hashsize(max_drc_entries);
        maskbits = ilog2(hashsize);
 
-       register_shrinker(&nfsd_reply_cache_shrinker);
+       status = register_shrinker(&nfsd_reply_cache_shrinker);
+       if (status)
+               return status;
+
        drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
                                        0, 0, NULL);
        if (!drc_slab)
index b4d71b5e1ff23a2d3f3ec5c481937edd571ead4d..f4131e8ead74965a73272949b3a9eae8fa08b5c7 100644 (file)
@@ -604,6 +604,7 @@ struct inode {
        struct mutex            i_mutex;
 
        unsigned long           dirtied_when;   /* jiffies of first dirtying */
+       unsigned long           dirtied_time_when;
 
        struct hlist_node       i_hash;
        struct list_head        i_wb_list;      /* backing dev IO list */
index 781974afff9f14e576a7912039a5fb68009cdb25..ffbc034c88104d251c3891f4513c12a62818c5dd 100644 (file)
 #define GICR_PROPBASER_WaWb            (5U << 7)
 #define GICR_PROPBASER_RaWaWt          (6U << 7)
 #define GICR_PROPBASER_RaWaWb          (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
 #define GICR_PROPBASER_IDBITS_MASK     (0x1f)
 
+#define GICR_PENDBASER_NonShareable    (0U << 10)
+#define GICR_PENDBASER_InnerShareable  (1U << 10)
+#define GICR_PENDBASER_OuterShareable  (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB            (0U << 7)
+#define GICR_PENDBASER_nC              (1U << 7)
+#define GICR_PENDBASER_RaWt            (2U << 7)
+#define GICR_PENDBASER_RaWb            (3U << 7)
+#define GICR_PENDBASER_WaWt            (4U << 7)
+#define GICR_PENDBASER_WaWb            (5U << 7)
+#define GICR_PENDBASER_RaWaWt          (6U << 7)
+#define GICR_PENDBASER_RaWaWb          (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
 /*
  * Re-Distributor registers, offsets from SGI_base
  */
 #define GITS_CBASER_WaWb               (5UL << 59)
 #define GITS_CBASER_RaWaWt             (6UL << 59)
 #define GITS_CBASER_RaWaWb             (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK  (7UL << 59)
 #define GITS_CBASER_NonShareable       (0UL << 10)
 #define GITS_CBASER_InnerShareable     (1UL << 10)
 #define GITS_CBASER_OuterShareable     (2UL << 10)
 #define GITS_BASER_WaWb                        (5UL << 59)
 #define GITS_BASER_RaWaWt              (6UL << 59)
 #define GITS_BASER_RaWaWb              (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK   (7UL << 59)
 #define GITS_BASER_TYPE_SHIFT          (56)
 #define GITS_BASER_TYPE(r)             (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
 #define GITS_BASER_ENTRY_SIZE_SHIFT    (48)
index 7bf01d779b4532a5a8d168d0ff91fd08d5228e02..1ce79a7f1daa18868adfe14598c35206382f58a5 100644 (file)
@@ -4,5 +4,6 @@
 #include <linux/compiler.h>
 
 unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
 
 #endif /* _LCM_H */
index fc03efa64ffe58ae9aa81e77bf60e5d7b4acc6c7..6b08cc106c218dc06d60988663c4bcac3eaba5fe 100644 (file)
@@ -232,6 +232,7 @@ enum {
                                              * led */
        ATA_FLAG_NO_DIPM        = (1 << 23), /* host not happy with DIPM */
        ATA_FLAG_LOWTAG         = (1 << 24), /* host wants lowest available tag */
+       ATA_FLAG_SAS_HOST       = (1 << 25), /* SAS host */
 
        /* bits 24:31 of ap->flags are reserved for LLD specific flags */
 
index fb0390a1a498f29df135bbe7500c1dfcc00a27d7..ee7b1ce7a6f8f42280abcc3d1411f7bcdebb6ac2 100644 (file)
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
 #define PALMAS_GPADC_TRIM15                                    0x0E
 #define PALMAS_GPADC_TRIM16                                    0x0F
 
+/* TPS659038 regen2_ctrl offset iss different from palmas */
+#define TPS659038_REGEN2_CTRL                                  0x12
+
 /* TPS65917 Interrupt registers */
 
 /* Registers for function INTERRUPT */
index d4ad5b5a02bb478a422b349406efba00997bab76..045f709cb89b52c5e2381635b0bea10101030120 100644 (file)
@@ -316,7 +316,7 @@ struct regulator_desc {
  * @driver_data: private regulator data
  * @of_node: OpenFirmware node to parse for device tree bindings (may be
  *           NULL).
- * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
+ * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
  *          insufficient.
  * @ena_gpio_initialized: GPIO controlling regulator enable was properly
  *                        initialized, meaning that >= 0 is a valid gpio
index 6d77432e14ff971bffd4ca211dccb917768b2c8c..a419b65770d669c3a51c88a86a145abbcd3db339 100644 (file)
@@ -1625,11 +1625,11 @@ struct task_struct {
 
        /*
         * numa_faults_locality tracks if faults recorded during the last
-        * scan window were remote/local. The task scan period is adapted
-        * based on the locality of the faults with different weights
-        * depending on whether they were shared or private faults
+        * scan window were remote/local or failed to migrate. The task scan
+        * period is adapted based on the locality of the faults with different
+        * weights depending on whether they were shared or private faults
         */
-       unsigned long numa_faults_locality[2];
+       unsigned long numa_faults_locality[3];
 
        unsigned long numa_pages_migrated;
 #endif /* CONFIG_NUMA_BALANCING */
@@ -1719,6 +1719,7 @@ struct task_struct {
 #define TNF_NO_GROUP   0x02
 #define TNF_SHARED     0x04
 #define TNF_FAULT_LOCAL        0x08
+#define TNF_MIGRATE_FAIL 0x10
 
 #ifdef CONFIG_NUMA_BALANCING
 extern void task_numa_fault(int last_node, int node, int pages, int flags);
index c57d8ea0716cddea1419a37481dbd7704b230d65..59a7889e15db51c3b24bc771dbe5d0b1c965212d 100644 (file)
@@ -60,17 +60,17 @@ struct rpc_xprt;
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 void           rpc_register_sysctl(void);
 void           rpc_unregister_sysctl(void);
-int            sunrpc_debugfs_init(void);
+void           sunrpc_debugfs_init(void);
 void           sunrpc_debugfs_exit(void);
-int            rpc_clnt_debugfs_register(struct rpc_clnt *);
+void           rpc_clnt_debugfs_register(struct rpc_clnt *);
 void           rpc_clnt_debugfs_unregister(struct rpc_clnt *);
-int            rpc_xprt_debugfs_register(struct rpc_xprt *);
+void           rpc_xprt_debugfs_register(struct rpc_xprt *);
 void           rpc_xprt_debugfs_unregister(struct rpc_xprt *);
 #else
-static inline int
+static inline void
 sunrpc_debugfs_init(void)
 {
-       return 0;
+       return;
 }
 
 static inline void
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
        return;
 }
 
-static inline int
+static inline void
 rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
 {
-       return 0;
+       return;
 }
 
 static inline void
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
        return;
 }
 
-static inline int
+static inline void
 rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
 {
-       return 0;
+       return;
 }
 
 static inline void
index d9a4905e01d0c98b88e89c7db5c85bbf7d5be8a1..6e0ce8c7b8cb5a9fcb985a5a5078f82267d03092 100644 (file)
@@ -227,9 +227,23 @@ struct skb_data {  /* skb->cb is one of these */
        struct urb              *urb;
        struct usbnet           *dev;
        enum skb_state          state;
-       size_t                  length;
+       long                    length;
+       unsigned long           packets;
 };
 
+/* Drivers that set FLAG_MULTI_PACKET must call this in their
+ * tx_fixup method before returning an skb.
+ */
+static inline void
+usbnet_set_skb_tx_stats(struct sk_buff *skb,
+                       unsigned long packets, long bytes_delta)
+{
+       struct skb_data *entry = (struct skb_data *) skb->cb;
+
+       entry->packets = packets;
+       entry->length = bytes_delta;
+}
+
 extern int usbnet_open(struct net_device *net);
 extern int usbnet_stop(struct net_device *net);
 extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
index 00048339c23e4f252ee6a4b15cd38b49b8032de4..b2dd371ec0ca0aa6f0e1dc71d1f79c69c6dbdb2b 100644 (file)
@@ -130,6 +130,7 @@ extern int vm_dirty_ratio;
 extern unsigned long vm_dirty_bytes;
 extern unsigned int dirty_writeback_interval;
 extern unsigned int dirty_expire_interval;
+extern unsigned int dirtytime_expire_interval;
 extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
@@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write,
 extern int dirty_bytes_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
 
 struct ctl_table;
 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
index 534e1f2ac4fc36d5baeb7b3115dc95aaebd065d9..57639fca223a69823a48cecff03078387a917084 100644 (file)
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net,
                   const struct nf_loginfo *li,
                   const char *fmt, ...);
 
+__printf(8, 9)
+void nf_log_trace(struct net *net,
+                 u_int8_t pf,
+                 unsigned int hooknum,
+                 const struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 const struct nf_loginfo *li,
+                 const char *fmt, ...);
+
 struct nf_log_buf;
 
 struct nf_log_buf *nf_log_buf_open(void);
index 23d561512f64fe65a5165a23d4620f7d457504a4..22317d2b52abcbe194ef8e9964bf179ae7d32846 100644 (file)
@@ -7,27 +7,26 @@
 #include <linux/ktime.h>
 #include <linux/tracepoint.h>
 
-struct device;
-struct regmap;
+#include "../../../drivers/base/regmap/internal.h"
 
 /*
  * Log register events
  */
 DECLARE_EVENT_CLASS(regmap_reg,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val),
+       TP_ARGS(map, reg, val),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   reg             )
-               __field(        unsigned int,   val             )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   reg                     )
+               __field(        unsigned int,   val                     )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->reg = reg;
                __entry->val = val;
        ),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
 
 DEFINE_EVENT(regmap_reg, regmap_reg_write,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DEFINE_EVENT(regmap_reg, regmap_reg_read,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DECLARE_EVENT_CLASS(regmap_block,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count),
+       TP_ARGS(map, reg, count),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   reg             )
-               __field(        int,            count           )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   reg                     )
+               __field(        int,            count                   )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->reg = reg;
                __entry->count = count;
        ),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
 
 DEFINE_EVENT(regmap_block, regmap_hw_read_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_read_done,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_write_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_write_done,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 TRACE_EVENT(regcache_sync,
 
-       TP_PROTO(struct device *dev, const char *type,
+       TP_PROTO(struct regmap *map, const char *type,
                 const char *status),
 
-       TP_ARGS(dev, type, status),
+       TP_ARGS(map, type, status),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __string(       status,         status          )
-               __string(       type,           type            )
-               __field(        int,            type            )
+               __string(       name,           regmap_name(map)        )
+               __string(       status,         status                  )
+               __string(       type,           type                    )
+               __field(        int,            type                    )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __assign_str(status, status);
                __assign_str(type, type);
        ),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
 
 DECLARE_EVENT_CLASS(regmap_bool,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag),
+       TP_ARGS(map, flag),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        int,            flag            )
+               __string(       name,           regmap_name(map)        )
+               __field(        int,            flag                    )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->flag = flag;
        ),
 
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
 
 DEFINE_EVENT(regmap_bool, regmap_cache_only,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag)
+       TP_ARGS(map, flag)
 
 );
 
 DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag)
+       TP_ARGS(map, flag)
 
 );
 
 DECLARE_EVENT_CLASS(regmap_async,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev),
+       TP_ARGS(map),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
+               __string(       name,           regmap_name(map)        )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
        ),
 
        TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
 
 DEFINE_EVENT(regmap_block, regmap_async_write_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_io_complete,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_complete_start,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_complete_done,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 TRACE_EVENT(regcache_drop_region,
 
-       TP_PROTO(struct device *dev, unsigned int from,
+       TP_PROTO(struct regmap *map, unsigned int from,
                 unsigned int to),
 
-       TP_ARGS(dev, from, to),
+       TP_ARGS(map, from, to),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   from            )
-               __field(        unsigned int,   to              )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   from                    )
+               __field(        unsigned int,   to                      )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->from = from;
                __entry->to = to;
        ),
index b0a81307985282005ade90ee6da96e3048c405d3..2f62ab2d7bf99bd75b16eef6db74ce8aa233e8ff 100644 (file)
@@ -973,7 +973,8 @@ struct input_keymap_entry {
  */
 #define MT_TOOL_FINGER         0
 #define MT_TOOL_PEN            1
-#define MT_TOOL_MAX            1
+#define MT_TOOL_PALM           2
+#define MT_TOOL_MAX            2
 
 /*
  * Values describing the status of a force-feedback effect
index 4742f2cb42f2bd46180421efef423f101579bd03..d3bd6ffec04101138e831076789fc11cee98f2be 100644 (file)
@@ -47,7 +47,7 @@
  * exported filesystem.
  */
 #define        NFSEXP_V4ROOT           0x10000
-#define NFSEXP_NOPNFS          0x20000
+#define NFSEXP_PNFS            0x20000
 
 /* All flags that we claim to support.  (Note we don't support NOACL.) */
 #define NFSEXP_ALLFLAGS                0x3FE7F
index 453ef61311d4cf069669fb449270a0700ae0f2f9..2fabc062716591e960dbab2c9cfc16a755895773 100644 (file)
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
 {
        struct perf_event *event = container_of(entry,
                        struct perf_event, pending);
+       int rctx;
+
+       rctx = perf_swevent_get_recursion_context();
+       /*
+        * If we 'fail' here, that's OK, it means recursion is already disabled
+        * and we won't recurse 'further'.
+        */
 
        if (event->pending_disable) {
                event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
                event->pending_wakeup = 0;
                perf_event_wakeup(event);
        }
+
+       if (rctx >= 0)
+               perf_swevent_put_recursion_context(rctx);
 }
 
 /*
index 88d0d4420ad2e3e47e71129d361b153c96a49e18..ba77ab5f64dd9809f5e24f1b079ac5abbefeb495 100644 (file)
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
        if (!new_class->name)
                return 0;
 
-       list_for_each_entry(class, &all_lock_classes, lock_entry) {
+       list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
                if (new_class->key - new_class->subclass == class->key)
                        return class->name_version;
                if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        hash_head = classhashentry(key);
 
        /*
-        * We can walk the hash lockfree, because the hash only
-        * grows, and we are careful when adding entries to the end:
+        * We do an RCU walk of the hash, see lockdep_free_key_range().
         */
-       list_for_each_entry(class, hash_head, hash_entry) {
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return NULL;
+
+       list_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key) {
                        /*
                         * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        struct lockdep_subclass_key *key;
        struct list_head *hash_head;
        struct lock_class *class;
-       unsigned long flags;
+
+       DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
        class = look_up_lock_class(lock, subclass);
        if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        key = lock->key->subkeys + subclass;
        hash_head = classhashentry(key);
 
-       raw_local_irq_save(flags);
        if (!graph_lock()) {
-               raw_local_irq_restore(flags);
                return NULL;
        }
        /*
         * We have to do the hash-walk again, to avoid races
         * with another CPU:
         */
-       list_for_each_entry(class, hash_head, hash_entry)
+       list_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key)
                        goto out_unlock_set;
+       }
+
        /*
         * Allocate a new key from the static array, and add it to
         * the hash:
         */
        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
                if (!debug_locks_off_graph_unlock()) {
-                       raw_local_irq_restore(flags);
                        return NULL;
                }
-               raw_local_irq_restore(flags);
 
                print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
                dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
        if (verbose(class)) {
                graph_unlock();
-               raw_local_irq_restore(flags);
 
                printk("\nnew class %p: %s", class->key, class->name);
                if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
                printk("\n");
                dump_stack();
 
-               raw_local_irq_save(flags);
                if (!graph_lock()) {
-                       raw_local_irq_restore(flags);
                        return NULL;
                }
        }
 out_unlock_set:
        graph_unlock();
-       raw_local_irq_restore(flags);
 
 out_set_class_cache:
        if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
        entry->distance = distance;
        entry->trace = *trace;
        /*
-        * Since we never remove from the dependency list, the list can
-        * be walked lockless by other CPUs, it's only allocation
-        * that must be protected by the spinlock. But this also means
-        * we must make new entries visible only once writes to the
-        * entry become visible - hence the RCU op:
+        * Both allocation and removal are done under the graph lock; but
+        * iteration is under RCU-sched; see look_up_lock_class() and
+        * lockdep_free_key_range().
         */
        list_add_tail_rcu(&entry->entry, head);
 
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
                else
                        head = &lock->class->locks_before;
 
-               list_for_each_entry(entry, head, entry) {
+               DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+               list_for_each_entry_rcu(entry, head, entry) {
                        if (!lock_accessed(entry)) {
                                unsigned int cq_depth;
                                mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
         * We can walk it lock-free, because entries only get added
         * to the hash:
         */
-       list_for_each_entry(chain, hash_head, entry) {
+       list_for_each_entry_rcu(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
 cache_hit:
                        debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        if (unlikely(!debug_locks))
                return;
 
-       if (subclass)
+       if (subclass) {
+               unsigned long flags;
+
+               if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+                       return;
+
+               raw_local_irq_save(flags);
+               current->lockdep_recursion = 1;
                register_lock_class(lock, subclass, 1);
+               current->lockdep_recursion = 0;
+               raw_local_irq_restore(flags);
+       }
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
        return addr >= start && addr < start + size;
 }
 
+/*
+ * Used in module.c to remove lock classes from memory that is going to be
+ * freed; and possibly re-used by other modules.
+ *
+ * We will have had one sync_sched() before getting here, so we're guaranteed
+ * nobody will look up these exact classes -- they're properly dead but still
+ * allocated.
+ */
 void lockdep_free_key_range(void *start, unsigned long size)
 {
-       struct lock_class *class, *next;
+       struct lock_class *class;
        struct list_head *head;
        unsigned long flags;
        int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
                head = classhash_table + i;
                if (list_empty(head))
                        continue;
-               list_for_each_entry_safe(class, next, head, hash_entry) {
+               list_for_each_entry_rcu(class, head, hash_entry) {
                        if (within(class->key, start, size))
                                zap_class(class);
                        else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
        if (locked)
                graph_unlock();
        raw_local_irq_restore(flags);
+
+       /*
+        * Wait for any possible iterators from look_up_lock_class() to pass
+        * before continuing to free the memory they refer to.
+        *
+        * sync_sched() is sufficient because the read-side is IRQ disable.
+        */
+       synchronize_sched();
+
+       /*
+        * XXX at this point we could return the resources to the pool;
+        * instead we leak them. We would need to change to bitmap allocators
+        * instead of the linear allocators we have now.
+        */
 }
 
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
-       struct lock_class *class, *next;
+       struct lock_class *class;
        struct list_head *head;
        unsigned long flags;
        int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                head = classhash_table + i;
                if (list_empty(head))
                        continue;
-               list_for_each_entry_safe(class, next, head, hash_entry) {
+               list_for_each_entry_rcu(class, head, hash_entry) {
                        int match = 0;
 
                        for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
index b3d634ed06c94f1b2ce986293d0cda84078d4a46..99fdf94efce80f432fc4ab203aeb2a918ad5c564 100644 (file)
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
        kfree(mod->args);
        percpu_modfree(mod);
 
-       /* Free lock-classes: */
+       /* Free lock-classes; relies on the preceding sync_rcu(). */
        lockdep_free_key_range(mod->module_core, mod->core_size);
 
        /* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
        module_bug_cleanup(mod);
        mutex_unlock(&module_mutex);
 
-       /* Free lock-classes: */
-       lockdep_free_key_range(mod->module_core, mod->core_size);
-
        /* we can't deallocate the module until we clear memory protection */
        unset_module_init_ro_nx(mod);
        unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
        synchronize_rcu();
        mutex_unlock(&module_mutex);
  free_module:
+       /* Free lock-classes; relies on the preceding sync_rcu() */
+       lockdep_free_key_range(mod->module_core, mod->core_size);
+
        module_deallocate(mod, info);
  free_copy:
        free_copy(info);
index f0f831e8a345d835f4cb21bf899c50ab67042b43..62671f53202ac7d4de8037dce950c934c7a4ddbc 100644 (file)
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        } else {
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
+               if (rt_prio(oldprio))
+                       p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
        }
 
index 7ce18f3c097ac4779eb4cf6ed0ad14ac1beb3eb5..bcfe32088b3768363c2f37502a953b61a361f7ff 100644 (file)
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p,
        /*
         * If there were no record hinting faults then either the task is
         * completely idle or all activity is areas that are not of interest
-        * to automatic numa balancing. Scan slower
+        * to automatic numa balancing. Related to that, if there were failed
+        * migration then it implies we are migrating too quickly or the local
+        * node is overloaded. In either case, scan slower
         */
-       if (local + shared == 0) {
+       if (local + shared == 0 || p->numa_faults_locality[2]) {
                p->numa_scan_period = min(p->numa_scan_period_max,
                        p->numa_scan_period << 1);
 
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
        if (migrated)
                p->numa_pages_migrated += pages;
+       if (flags & TNF_MIGRATE_FAIL)
+               p->numa_faults_locality[2] += pages;
 
        p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
        p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
index 88ea2d6e00314059b96adb0505ffc5f9c98fcf73..ce410bb9f2e103e0fcfda7d7b844948a0a28fbce 100644 (file)
@@ -1227,6 +1227,14 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &zero,
        },
+       {
+               .procname       = "dirtytime_expire_seconds",
+               .data           = &dirtytime_expire_interval,
+               .maxlen         = sizeof(dirty_expire_interval),
+               .mode           = 0644,
+               .proc_handler   = dirtytime_interval_handler,
+               .extra1         = &zero,
+       },
        {
                .procname       = "nr_pdflush_threads",
                .mode           = 0444 /* read-only */,
index eb682d5c697cd5b67c988654915d1f5333fbe342..6aac4beedbbe235951c0671336e52b2459a047fb 100644 (file)
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
+       int bc_moved;
        /*
         * We try to cancel the timer first. If the callback is on
         * flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
         * restart the timer because we are in the callback, but we
         * can set the expiry time and let the callback return
         * HRTIMER_RESTART.
+        *
+        * Since we are in the idle loop at this point and because
+        * hrtimer_{start/cancel} functions call into tracing,
+        * calls to these functions must be bound within RCU_NONIDLE.
         */
-       if (hrtimer_try_to_cancel(&bctimer) >= 0) {
-               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+       RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
+               !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
+                       0);
+       if (bc_moved) {
                /* Bind the "device" to the cpu */
                bc->bound_on = smp_processor_id();
        } else if (bc->bound_on == smp_processor_id()) {
index e97dbd51e7569f6a7ba273227752f2cdbcaebb49..03d7fcb420b5d60c564ad10935011ed8a6556b69 100644 (file)
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
                return 0;
 }
 EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+       unsigned long l = lcm(a, b);
+
+       if (l)
+               return l;
+
+       return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
index 76a1b59523ab05907403f4f9bd5dc547fca1bcab..f5907d23272d48562c69b911e5a0a619e3f4c180 100644 (file)
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
        int minlen = min_t(int, count, nla_len(src));
 
        memcpy(dest, nla_data(src), minlen);
+       if (count > minlen)
+               memset(dest + minlen, 0, count - minlen);
 
        return minlen;
 }
index 626e93db28ba162d11e7d286985604bbc523981c..6817b0350c71c43b0f89a4972ce19c51a2408a2b 100644 (file)
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int target_nid, last_cpupid = -1;
        bool page_locked;
        bool migrated = false;
+       bool was_writable;
        int flags = 0;
 
        /* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                flags |= TNF_FAULT_LOCAL;
        }
 
-       /*
-        * Avoid grouping on DSO/COW pages in specific and RO pages
-        * in general, RO pages shouldn't hurt as much anyway since
-        * they can be in shared cache state.
-        *
-        * FIXME! This checks "pmd_dirty()" as an approximation of
-        * "is this a read-only page", since checking "pmd_write()"
-        * is even more broken. We haven't actually turned this into
-        * a writable page, so pmd_write() will always be false.
-        */
-       if (!pmd_dirty(pmd))
+       /* See similar comment in do_numa_page for explanation */
+       if (!(vma->vm_flags & VM_WRITE))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (migrated) {
                flags |= TNF_MIGRATED;
                page_nid = target_nid;
-       }
+       } else
+               flags |= TNF_MIGRATE_FAIL;
 
        goto out;
 clear_pmdnuma:
        BUG_ON(!PageLocked(page));
+       was_writable = pmd_write(pmd);
        pmd = pmd_modify(pmd, vma->vm_page_prot);
+       pmd = pmd_mkyoung(pmd);
+       if (was_writable)
+               pmd = pmd_mkwrite(pmd);
        set_pmd_at(mm, haddr, pmdp, pmd);
        update_mmu_cache_pmd(vma, addr, pmdp);
        unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                pmd_t entry;
+               bool preserve_write = prot_numa && pmd_write(*pmd);
                ret = 1;
 
                /*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                if (!prot_numa || !pmd_protnone(*pmd)) {
                        entry = pmdp_get_and_clear_notify(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
+                       if (preserve_write)
+                               entry = pmd_mkwrite(entry);
                        ret = HPAGE_PMD_NR;
                        set_pmd_at(mm, addr, pmd, entry);
-                       BUG_ON(pmd_write(entry));
+                       BUG_ON(!preserve_write && pmd_write(entry));
                }
                spin_unlock(ptl);
        }
index 411144f977b10eab492410728784efe37c4ea54a..97839f5c8c303df324a1cec1dfacadb1b0bfa04c 100644 (file)
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int last_cpupid;
        int target_nid;
        bool migrated = false;
+       bool was_writable = pte_write(pte);
        int flags = 0;
 
        /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Make it present again */
        pte = pte_modify(pte, vma->vm_page_prot);
        pte = pte_mkyoung(pte);
+       if (was_writable)
+               pte = pte_mkwrite(pte);
        set_pte_at(mm, addr, ptep, pte);
        update_mmu_cache(vma, addr, ptep);
 
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /*
-        * Avoid grouping on DSO/COW pages in specific and RO pages
-        * in general, RO pages shouldn't hurt as much anyway since
-        * they can be in shared cache state.
-        *
-        * FIXME! This checks "pmd_dirty()" as an approximation of
-        * "is this a read-only page", since checking "pmd_write()"
-        * is even more broken. We haven't actually turned this into
-        * a writable page, so pmd_write() will always be false.
+        * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+        * much anyway since they can be in shared cache state. This misses
+        * the case where a mapping is writable but the process never writes
+        * to it but pte_write gets cleared during protection updates and
+        * pte_dirty has unpredictable behaviour between PTE scan updates,
+        * background writeback, dirty balancing and application behaviour.
         */
-       if (!pte_dirty(pte))
+       if (!(vma->vm_flags & VM_WRITE))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (migrated) {
                page_nid = target_nid;
                flags |= TNF_MIGRATED;
-       }
+       } else
+               flags |= TNF_MIGRATE_FAIL;
 
 out:
        if (page_nid != -1)
index 9fab10795beabd723c29722a442ace37c349c66b..65842d688b7c9bb5dbfd2440fc07339fa94650f4 100644 (file)
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
                        return NULL;
 
                arch_refresh_nodedata(nid, pgdat);
+       } else {
+               /* Reset the nr_zones and classzone_idx to 0 before reuse */
+               pgdat->nr_zones = 0;
+               pgdat->classzone_idx = 0;
        }
 
        /* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
                if (is_vmalloc_addr(zone->wait_table))
                        vfree(zone->wait_table);
        }
-
-       /*
-        * Since there is no way to guarentee the address of pgdat/zone is not
-        * on stack of any kernel threads or used by other kernel objects
-        * without reference counting or other symchronizing method, do not
-        * reset node_data and free pgdat here. Just reset it to 0 and reuse
-        * the memory when the node is online again.
-        */
-       memset(pgdat, 0, sizeof(*pgdat));
 }
 EXPORT_SYMBOL(try_offline_node);
 
index da9990acc08b2d8014e342771a52650b47ee05fc..9ec50a368634a8d9a0824504d479b03798bdc402 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again:                     remove_next = 1 + (end > next->vm_end);
 
                        importer->anon_vma = exporter->anon_vma;
                        error = anon_vma_clone(importer, exporter);
-                       if (error) {
-                               importer->anon_vma = NULL;
+                       if (error)
                                return error;
-                       }
                }
        }
 
index 44727811bf4cf62e3579261ee9699a37fab78b3d..88584838e7046bec724d68c0cafcd94eec65a040 100644 (file)
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                oldpte = *pte;
                if (pte_present(oldpte)) {
                        pte_t ptent;
+                       bool preserve_write = prot_numa && pte_write(oldpte);
 
                        /*
                         * Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                        ptent = ptep_modify_prot_start(mm, addr, pte);
                        ptent = pte_modify(ptent, newprot);
+                       if (preserve_write)
+                               ptent = pte_mkwrite(ptent);
 
                        /* Avoid taking write faults for known dirty pages */
                        if (dirty_accountable && pte_dirty(ptent) &&
index 45e187b2d97183a90df9a5ee8558404f9f1bd826..644bcb665773f6e53f50fe6599429595b506f0c5 100644 (file)
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
         *                   bw * elapsed + write_bandwidth * (period - elapsed)
         * write_bandwidth = ---------------------------------------------------
         *                                          period
+        *
+        * @written may have decreased due to account_page_redirty().
+        * Avoid underflowing @bw calculation.
         */
-       bw = written - bdi->written_stamp;
+       bw = written - min(written, bdi->written_stamp);
        bw *= HZ;
        if (unlikely(elapsed > period)) {
                do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
                                    unsigned long now)
 {
        static DEFINE_SPINLOCK(dirty_lock);
-       static unsigned long update_time;
+       static unsigned long update_time = INITIAL_JIFFIES;
 
        /*
         * check locklessly first to optimize away locking for the most time
index 72f5ac381ab3253b6016583e0618be6f3e91367c..755a42c76eb4747623da51acdeb780b322b5ac06 100644 (file)
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 
                        if (!is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
+                               kernel_map_pages(page, (1 << order), 1);
                                set_page_refcounted(page);
                                isolated_page = page;
                        }
index 75c1f2878519171139ae2d0b6f1e24809067ab4a..29f2f8b853ae51be4f9e35fbc1495ad69297ff82 100644 (file)
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
                        vma = vma->vm_next;
 
                        err = walk_page_test(start, next, walk);
-                       if (err > 0)
+                       if (err > 0) {
+                               /*
+                                * positive return values are purely for
+                                * controlling the pagewalk, so should never
+                                * be passed to the callers.
+                                */
+                               err = 0;
                                continue;
+                       }
                        if (err < 0)
                                break;
                }
index 5e3e09081164b83814683513c174445e67669a4b..c161a14b6a8fb127150678f18582ad6f0a29f55d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
        return 0;
 
  enomem_failure:
+       /*
+        * dst->anon_vma is dropped here otherwise its degree can be incorrectly
+        * decremented in unlink_anon_vmas().
+        * We can safely do this because callers of anon_vma_clone() don't care
+        * about dst->anon_vma if anon_vma_clone() failed.
+        */
+       dst->anon_vma = NULL;
        unlink_anon_vmas(dst);
        return -ENOMEM;
 }
index 6832c4eab104d15ff3d3bd907c92591540facf01..82c473780c9188ecf7bfc393703f47793a2fbfe9 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+                unlikely(tid != READ_ONCE(c->tid)));
 
        /*
         * Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+                unlikely(tid != READ_ONCE(c->tid)));
 
        /* Same with comment on barrier() in slab_alloc_node() */
        barrier();
index 94d3d5e978832cba85b7212988f735780ceed9dd..f7bd286a82807148bed32622e5de2f975450c877 100644 (file)
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
            __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
            __get_user(kmsg->msg_flags, &umsg->msg_flags))
                return -EFAULT;
+
+       if (!uaddr)
+               kmsg->msg_namelen = 0;
+
+       if (kmsg->msg_namelen < 0)
+               return -EINVAL;
+
        if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
                kmsg->msg_namelen = sizeof(struct sockaddr_storage);
        kmsg->msg_control = compat_ptr(tmp3);
index cb5290b8c428c5c348b25d842ab9cf3797b70eba..5221f975a4cc313bd622ecbe9bba263a4faff4a1 100644 (file)
@@ -349,7 +349,7 @@ static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
 static void cleanup_net(struct work_struct *work)
 {
        const struct pernet_operations *ops;
-       struct net *net, *tmp;
+       struct net *net, *tmp, *peer;
        struct list_head net_kill_list;
        LIST_HEAD(net_exit_list);
 
@@ -365,14 +365,6 @@ static void cleanup_net(struct work_struct *work)
        list_for_each_entry(net, &net_kill_list, cleanup_list) {
                list_del_rcu(&net->list);
                list_add_tail(&net->exit_list, &net_exit_list);
-               for_each_net(tmp) {
-                       int id = __peernet2id(tmp, net, false);
-
-                       if (id >= 0)
-                               idr_remove(&tmp->netns_ids, id);
-               }
-               idr_destroy(&net->netns_ids);
-
        }
        rtnl_unlock();
 
@@ -398,12 +390,26 @@ static void cleanup_net(struct work_struct *work)
         */
        rcu_barrier();
 
+       rtnl_lock();
        /* Finally it is safe to free my network namespace structure */
        list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
+               /* Unreference net from all peers (no need to loop over
+                * net_exit_list because idr_destroy() will be called for each
+                * element of this list.
+                */
+               for_each_net(peer) {
+                       int id = __peernet2id(peer, net, false);
+
+                       if (id >= 0)
+                               idr_remove(&peer->netns_ids, id);
+               }
+               idr_destroy(&net->netns_ids);
+
                list_del_init(&net->exit_list);
                put_user_ns(net->user_ns);
                net_drop_ns(net);
        }
+       rtnl_unlock();
 }
 static DECLARE_WORK(net_cleanup_work, cleanup_net);
 
index ee0608bb3bc087212d12bac729677bf454053560..7ebed55b5f7d1b2d1faacea1a49e7f9e947f43d1 100644 (file)
@@ -1932,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
                struct ifinfomsg *ifm,
                struct nlattr **tb)
 {
-       struct net_device *dev;
+       struct net_device *dev, *aux;
        int err;
 
-       for_each_netdev(net, dev) {
+       for_each_netdev_safe(net, dev, aux) {
                if (dev->group == group) {
                        err = do_setlink(skb, dev, ifm, tb, NULL, 0);
                        if (err < 0)
index 9d78427652d23e33a46ab7ce2d4b6dbac1660781..92825443fad6ea0f063fedfaa176761717fb739e 100644 (file)
@@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net)
        return 0;
 
 err2:
-       kfree(mrt);
+       ipmr_free_table(mrt);
 err1:
        fib_rules_unregister(ops);
        return err;
index 99e810f84671bbdb80e33c6915cc7be49ba0f1bb..cf5e82f39d3b87d7f8163320bffbc26af38d6f98 100644 (file)
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb,
                    &chainname, &comment, &rulenum) != 0)
                        break;
 
-       nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo,
-                     "TRACE: %s:%s:%s:%u ",
-                     tablename, chainname, comment, rulenum);
+       nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
+                    "TRACE: %s:%s:%s:%u ",
+                    tablename, chainname, comment, rulenum);
 }
 #endif
 
index 5a2dfed4783b6ed0185dccded960972b4d6e13b0..f1756ee022078d12e74be5d245fc1a6a0c1c4a34 100644 (file)
@@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk->sk_state != TCP_TIME_WAIT) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, 0);
index a2a796c5536b032264e2a71f596f673e8307f25c..1db253e36045ac038d7449a06d312275535a8014 100644 (file)
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk)
        } else {
                /* Socket is locked, keep trying until memory is available. */
                for (;;) {
-                       skb = alloc_skb_fclone(MAX_TCP_HEADER,
-                                              sk->sk_allocation);
+                       skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
                        if (skb)
                                break;
                        yield();
                }
-
-               /* Reserve space for headers and prepare control bits. */
-               skb_reserve(skb, MAX_TCP_HEADER);
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
                tcp_init_nondata_skb(skb, tp->write_seq,
                                     TCPHDR_ACK | TCPHDR_FIN);
index b4d5e1d97c1b2576fc02a15b7a05a773197bb8e0..27ca79682efbf681a0ab6073f50f8fa73214028e 100644 (file)
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                                goto again;
                        flp6->saddr = saddr;
                }
+               err = rt->dst.error;
                goto out;
        }
 again:
index 34b682617f504359cecff4447c6015f90623e949..52028f449a892d9457314799e6767eef5a401824 100644 (file)
@@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
        return 0;
 
 err2:
-       kfree(mrt);
+       ip6mr_free_table(mrt);
 err1:
        fib_rules_unregister(ops);
        return err;
index 471ed24aabaec4b1d8736438696ca3490d4e0f58..14ecdaf06bf7497dc71199fc5638b49592a24655 100644 (file)
@@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        if (rt)
                rt6_set_expires(rt, jiffies + (HZ * lifetime));
        if (ra_msg->icmph.icmp6_hop_limit) {
-               in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+               /* Only set hop_limit on the interface if it is higher than
+                * the current hop_limit.
+                */
+               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+                       in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+               } else {
+                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+               }
                if (rt)
                        dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
                                       ra_msg->icmph.icmp6_hop_limit);
index e080fbbbc0e5ce8d4d71014eed149e6b34250b62..bb00c6f2a8855fb72dcc6a1bc5b496e8216d683f 100644 (file)
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb,
                    &chainname, &comment, &rulenum) != 0)
                        break;
 
-       nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
-                     "TRACE: %s:%s:%s:%u ",
-                     tablename, chainname, comment, rulenum);
+       nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
+                    "TRACE: %s:%s:%s:%u ",
+                    tablename, chainname, comment, rulenum);
 }
 #endif
 
index 5d46832c6f72b89a278a3326918a3c8bff9afed4..1f5e62229aaa8b4d5822f82c3af3a6c8b1382ba6 100644 (file)
@@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
        TCP_SKB_CB(skb)->sacked = 0;
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
        const struct tcphdr *th;
@@ -1543,6 +1552,7 @@ do_time_wait:
                        inet_twsk_deschedule(tw, &tcp_death_row);
                        inet_twsk_put(tw);
                        sk = sk2;
+                       tcp_v6_restore_cb(skb);
                        goto process;
                }
                /* Fall through to ACK */
@@ -1551,6 +1561,7 @@ do_time_wait:
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
+               tcp_v6_restore_cb(skb);
                goto no_tcp_socket;
        case TCP_TW_SUCCESS:
                ;
@@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk->sk_state != TCP_TIME_WAIT) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
index ab889bb16b3cb077d26ddd2837b8b2eaaf1de666..be2c0ba82c8525ca466468ae45b05df5b35f85b6 100644 (file)
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
-               if (skb_shinfo(skb)->ip6_frag_id)
-                       fptr->identification = skb_shinfo(skb)->ip6_frag_id;
-               else
-                       ipv6_select_ident(fptr,
-                                         (struct rt6_info *)skb_dst(skb));
+               if (!skb_shinfo(skb)->ip6_frag_id)
+                       ipv6_proxy_select_ident(skb);
+               fptr->identification = skb_shinfo(skb)->ip6_frag_id;
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
                 * fragment header are updated in ipv6_gso_segment()
index 2e9953b2db8402dd71c88691f263db00f2cba3e2..53d931172088b15b2890c42dd649308771b6e7d3 100644 (file)
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        noblock, &err);
        else
                skb = sock_alloc_send_skb(sk, len, noblock, &err);
-       if (!skb) {
-               err = -ENOMEM;
+       if (!skb)
                goto out;
-       }
        if (iucv->transport == AF_IUCV_TRANS_HIPER)
                skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
        if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
index a48bad468880aa60c80b45a1cc97bef616845ffa..7702978a4c999dfd10d9b49792b8b25899e23966 100644 (file)
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
                container_of(h, struct tid_ampdu_rx, rcu_head);
        int i;
 
-       del_timer_sync(&tid_rx->reorder_timer);
-
        for (i = 0; i < tid_rx->buf_size; i++)
                __skb_queue_purge(&tid_rx->reorder_buf[i]);
        kfree(tid_rx->reorder_buf);
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 
        del_timer_sync(&tid_rx->session_timer);
 
+       /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
+       spin_lock_bh(&tid_rx->reorder_lock);
+       tid_rx->removed = true;
+       spin_unlock_bh(&tid_rx->reorder_lock);
+       del_timer_sync(&tid_rx->reorder_timer);
+
        call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
 
index 944bdc04e913d2f599b6c5845ee2549abba20a1c..1eb730bf875272831d44ac62c6e5a18e0a1de977 100644 (file)
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
  set_release_timer:
 
-               mod_timer(&tid_agg_rx->reorder_timer,
-                         tid_agg_rx->reorder_time[j] + 1 +
-                         HT_RX_REORDER_BUF_TIMEOUT);
+               if (!tid_agg_rx->removed)
+                       mod_timer(&tid_agg_rx->reorder_timer,
+                                 tid_agg_rx->reorder_time[j] + 1 +
+                                 HT_RX_REORDER_BUF_TIMEOUT);
        } else {
                del_timer(&tid_agg_rx->reorder_timer);
        }
index 925e68fe64c755766c2ecc30d047455f62c11362..fb0fc1302a588480cae6649e2e671ffa719de36b 100644 (file)
@@ -175,6 +175,7 @@ struct tid_ampdu_tx {
  * @reorder_lock: serializes access to reorder buffer, see below.
  * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
  *     and ssn.
+ * @removed: this session is removed (but might have been found due to RCU)
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_rx {
        u16 timeout;
        u8 dialog_token;
        bool auto_seq;
+       bool removed;
 };
 
 /**
index 0d8448f19dfe982f64879d9dc55ba7d746696196..675d12c69e325c70e46ca5e5522fb5f8c77b7d8b 100644 (file)
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net,
 }
 EXPORT_SYMBOL(nf_log_packet);
 
+void nf_log_trace(struct net *net,
+                 u_int8_t pf,
+                 unsigned int hooknum,
+                 const struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 const struct nf_loginfo *loginfo, const char *fmt, ...)
+{
+       va_list args;
+       char prefix[NF_LOG_PREFIXLEN];
+       const struct nf_logger *logger;
+
+       rcu_read_lock();
+       logger = rcu_dereference(net->nf.nf_loggers[pf]);
+       if (logger) {
+               va_start(args, fmt);
+               vsnprintf(prefix, sizeof(prefix), fmt, args);
+               va_end(args);
+               logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(nf_log_trace);
+
 #define S_SIZE (1024 - (sizeof(unsigned int) + 1))
 
 struct nf_log_buf {
index 6ab777912237976ed72cdd34f18a7bea336f44ae..ac1a9528dbf2e4af0d33fec5667369d23ed179e4 100644 (file)
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
 
        if (nla[NFTA_CHAIN_POLICY]) {
                if ((chain != NULL &&
-                   !(chain->flags & NFT_BASE_CHAIN)) ||
+                   !(chain->flags & NFT_BASE_CHAIN)))
+                       return -EOPNOTSUPP;
+
+               if (chain == NULL &&
                    nla[NFTA_CHAIN_HOOK] == NULL)
                        return -EOPNOTSUPP;
 
index 3b90eb2b2c55453e989c891a3f815be6e1da22d1..2d298dccb6dd3fc5589be16021c843d741a4c025 100644 (file)
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 {
        struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 
-       nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
-                     pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
-                     chain->table->name, chain->name, comments[type],
-                     rulenum);
+       nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
+                    pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+                    chain->table->name, chain->name, comments[type],
+                    rulenum);
 }
 
 unsigned int
index a5599fc51a6f3f87db4d63ecd20073481520a238..54330fb5efaf632d39d3e00135ac3360bdfdb80d 100644 (file)
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
        if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
                return -EINVAL;
 
+       /* Not all fields are initialized so first zero the tuple */
+       memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
        tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
        tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
index 213584cf04b34858164a07f99f1fc0b623c1ab44..65f3e2b6be44031448d85323ccf17cddaffd5557 100644 (file)
@@ -133,6 +133,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
                break;
        case AF_INET6:
+               if (proto)
+                       entry->e6.ipv6.flags |= IP6T_F_PROTO;
+
                entry->e6.ipv6.proto = proto;
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
@@ -344,6 +347,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
                break;
        case AF_INET6:
+               if (proto)
+                       entry->e6.ipv6.flags |= IP6T_F_PROTO;
+
                entry->e6.ipv6.proto = proto;
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
index c82df0a48fcd8a649921b3fcb6b5d1edd39c6295..37c15e6748841053df56fe092a49ced6fb06b077 100644 (file)
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                                iter->err = err;
                                goto out;
                        }
+
+                       continue;
                }
 
                if (iter->count < iter->skip)
index ef8a926752a97542f6f2f8eeb378e150958bff3d..50e1e5aaf4ce82ff7bbf1ee7171aaa51d54eefd1 100644 (file)
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
 {
        const struct ip6t_ip6 *i = par->entryinfo;
 
-       if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
-           && !(i->flags & IP6T_INV_PROTO))
+       if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) &&
+           !(i->invflags & IP6T_INV_PROTO))
                return 0;
 
        pr_info("Can be used only in combination with "
index ec2954ffc690c612eb1b04b018134ba0f52ba5c8..067a3fff1d2cb0c629c1dc2d75d0353b9269ba71 100644 (file)
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
        ASSERT_OVSL();
 
        hlist_del_rcu(&vport->hash_node);
-
-       vport->ops->destroy(vport);
-
        module_put(vport->ops->owner);
+       vport->ops->destroy(vport);
 }
 
 /**
index bbedbfcb42c2505fceb57fa058f262d90e1670ed..245330ca0015c2fd2548ead861d379714151c901 100644 (file)
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
 
        if (len > INT_MAX)
                len = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+               return -EFAULT;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
 
        if (size > INT_MAX)
                size = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+               return -EFAULT;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
index 612aa73bbc60c990a320054e4bd7d2846575428e..e6ce1517367f884608640b2532080ab6566b9379 100644 (file)
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt,
        struct super_block *pipefs_sb;
        int err;
 
-       err = rpc_clnt_debugfs_register(clnt);
-       if (err)
-               return err;
+       rpc_clnt_debugfs_register(clnt);
 
        pipefs_sb = rpc_get_sb_net(net);
        if (pipefs_sb) {
index e811f390f9f67ceb2e897ee8da79189417eacc75..82962f7e6e888f619ad79754f038732d5d5b6333 100644 (file)
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = {
        .release        = tasks_release,
 };
 
-int
+void
 rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
 {
-       int len, err;
+       int len;
        char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
+       struct rpc_xprt *xprt;
 
        /* Already registered? */
-       if (clnt->cl_debugfs)
-               return 0;
+       if (clnt->cl_debugfs || !rpc_clnt_dir)
+               return;
 
        len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
        if (len >= sizeof(name))
-               return -EINVAL;
+               return;
 
        /* make the per-client dir */
        clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
        if (!clnt->cl_debugfs)
-               return -ENOMEM;
+               return;
 
        /* make tasks file */
-       err = -ENOMEM;
        if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
                                 clnt, &tasks_fops))
                goto out_err;
 
-       err = -EINVAL;
        rcu_read_lock();
+       xprt = rcu_dereference(clnt->cl_xprt);
+       /* no "debugfs" dentry? Don't bother with the symlink. */
+       if (!xprt->debugfs) {
+               rcu_read_unlock();
+               return;
+       }
        len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
-                       rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name);
+                       xprt->debugfs->d_name.name);
        rcu_read_unlock();
+
        if (len >= sizeof(name))
                goto out_err;
 
-       err = -ENOMEM;
        if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name))
                goto out_err;
 
-       return 0;
+       return;
 out_err:
        debugfs_remove_recursive(clnt->cl_debugfs);
        clnt->cl_debugfs = NULL;
-       return err;
 }
 
 void
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = {
        .release        = xprt_info_release,
 };
 
-int
+void
 rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
 {
        int len, id;
        static atomic_t cur_id;
        char            name[9]; /* 8 hex digits + NULL term */
 
+       if (!rpc_xprt_dir)
+               return;
+
        id = (unsigned int)atomic_inc_return(&cur_id);
 
        len = snprintf(name, sizeof(name), "%x", id);
        if (len >= sizeof(name))
-               return -EINVAL;
+               return;
 
        /* make the per-client dir */
        xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
        if (!xprt->debugfs)
-               return -ENOMEM;
+               return;
 
        /* make tasks file */
        if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
                                 xprt, &xprt_info_fops)) {
                debugfs_remove_recursive(xprt->debugfs);
                xprt->debugfs = NULL;
-               return -ENOMEM;
        }
-
-       return 0;
 }
 
 void
@@ -266,14 +270,17 @@ void __exit
 sunrpc_debugfs_exit(void)
 {
        debugfs_remove_recursive(topdir);
+       topdir = NULL;
+       rpc_clnt_dir = NULL;
+       rpc_xprt_dir = NULL;
 }
 
-int __init
+void __init
 sunrpc_debugfs_init(void)
 {
        topdir = debugfs_create_dir("sunrpc", NULL);
        if (!topdir)
-               goto out;
+               return;
 
        rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
        if (!rpc_clnt_dir)
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void)
        if (!rpc_xprt_dir)
                goto out_remove;
 
-       return 0;
+       return;
 out_remove:
        debugfs_remove_recursive(topdir);
        topdir = NULL;
-out:
-       return -ENOMEM;
+       rpc_clnt_dir = NULL;
 }
index e37fbed879568da535aa540656e7b7ace508e2cb..ee5d3d253102bf5d81a39f953248a6a6ca7a38d6 100644 (file)
@@ -98,10 +98,7 @@ init_sunrpc(void)
        if (err)
                goto out4;
 
-       err = sunrpc_debugfs_init();
-       if (err)
-               goto out5;
-
+       sunrpc_debugfs_init();
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        rpc_register_sysctl();
 #endif
@@ -109,8 +106,6 @@ init_sunrpc(void)
        init_socket_xprt();     /* clnt sock transport */
        return 0;
 
-out5:
-       unregister_rpc_pipefs();
 out4:
        unregister_pernet_subsys(&sunrpc_net_ops);
 out3:
index e3015aede0d9443d99eba6b820aed104ab7515a6..9949722d99cebf6afa15953d8a9ac6a5c0bc2824 100644 (file)
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
  */
 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
 {
-       int err;
        struct rpc_xprt *xprt;
        struct xprt_class *t;
 
@@ -1372,11 +1371,7 @@ found:
                return ERR_PTR(-ENOMEM);
        }
 
-       err = rpc_xprt_debugfs_register(xprt);
-       if (err) {
-               xprt_destroy(xprt);
-               return ERR_PTR(err);
-       }
+       rpc_xprt_debugfs_register(xprt);
 
        dprintk("RPC:       created transport %p with %u slots\n", xprt,
                        xprt->max_reqs);
index 935205e6bcfe6da614fcc6c67e77754dc3a484fa..be1c9fa60b09dc713155c94e7bf6bcc6366fc7aa 100644 (file)
@@ -152,11 +152,11 @@ out_netlink:
 static void __exit tipc_exit(void)
 {
        tipc_bearer_cleanup();
+       unregister_pernet_subsys(&tipc_net_ops);
        tipc_netlink_stop();
        tipc_netlink_compat_stop();
        tipc_socket_stop();
        tipc_unregister_sysctl();
-       unregister_pernet_subsys(&tipc_net_ops);
 
        pr_info("Deactivated\n");
 }
index 1684bcc78b34e42395b1db335c9122c405987c23..5fde34326dcf28312ab4c36cb09623ecb811760b 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
                goto out;
 
        /* No partial writes. */
-       length = EINVAL;
+       length = -EINVAL;
        if (*ppos != 0)
                goto out;
 
index 4ca3d5d02436daf0ec7ec7274c675c4beae74423..a8a1e14272a1e574302da56ad0ad7540bf8fd60d 100644 (file)
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Sunrise Point */
        { PCI_DEVICE(0x8086, 0xa170),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
index 526398a4a4428da11d5792c9c7f2a299cac9bd43..74382137b9f5abcd67b9c4c44581c9a80f26d17d 100644 (file)
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
 {
        /* We currently only handle front, HP */
        static hda_nid_t pins[] = {
-               0x0f, 0x10, 0x14, 0x15, 0
+               0x0f, 0x10, 0x14, 0x15, 0x17, 0
        };
        hda_nid_t *p;
        for (p = pins; *p; p++)
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
index 4e511221a0c11cd53588fc2ae507848ab94339cb..0db571340edbd94a7042a63f517f410b42c127d7 100644 (file)
@@ -22,6 +22,14 @@ TARGETS += vm
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
+# Clear LDFLAGS and MAKEFLAGS if called from main
+# Makefile to avoid test build failures when test
+# Makefile doesn't have explicit build rules.
+ifeq (1,$(MAKELEVEL))
+undefine LDFLAGS
+override MAKEFLAGS =
+endif
+
 all:
        for TARGET in $(TARGETS); do \
                make -C $$TARGET; \
index a2214d9609bda59c8e48e841823da4f459081d52..cc6a25d95fbff532bf5b00b0c339bec91ddc5bcf 100644 (file)
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
        r = -ENOMEM;
-       kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
        if (!kvm->memslots)
                goto out_err_no_srcu;
 
@@ -522,7 +522,7 @@ out_err_no_srcu:
 out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm->buses[i]);
-       kfree(kvm->memslots);
+       kvfree(kvm->memslots);
        kvm_arch_free_vm(kvm);
        return ERR_PTR(r);
 }
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm)
        kvm_for_each_memslot(memslot, slots)
                kvm_free_physmem_slot(kvm, memslot, NULL);
 
-       kfree(kvm->memslots);
+       kvfree(kvm->memslots);
 }
 
 static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        goto out_free;
        }
 
-       slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
-                       GFP_KERNEL);
+       slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
        if (!slots)
                goto out_free;
+       memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 
        if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
                slot = id_to_memslot(slots, mem->slot);
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        kvm_arch_commit_memory_region(kvm, mem, &old, change);
 
        kvm_free_physmem_slot(kvm, &old, &new);
-       kfree(old_memslots);
+       kvfree(old_memslots);
 
        /*
         * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        return 0;
 
 out_slots:
-       kfree(slots);
+       kvfree(slots);
 out_free:
        kvm_free_physmem_slot(kvm, &new, &old);
 out: