]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'pm+acpi-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Nov 2014 02:08:25 +0000 (19:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Nov 2014 02:08:25 +0000 (19:08 -0700)
Pull ACPI and power management fixes from Rafael Wysocki:
 "These are fixes received after my previous pull request plus one that
  has been in the works for quite a while, but its previous version
  caused problems to happen, so it's been deferred till now.

  Fixed are two recent regressions (MFD enumeration and cpufreq-dt),
  ACPI EC regression introduced in 3.17, system suspend error code path
  regression introduced in 3.15, an older bug related to recovery from
  failing resume from hibernation and a cpufreq-dt driver issue related
  to operation performance points.

  Specifics:

   - Fix a crash on r8a7791/koelsch during resume from system suspend
     caused by a recent cpufreq-dt commit (Geert Uytterhoeven).

   - Fix an MFD enumeration problem introduced by a recent commit adding
     ACPI support to the MFD subsystem that exposed a weakness in the
     ACPI core causing ACPI enumeration to be applied to all devices
     associated with one ACPI companion object, although it should be
     used for one of them only (Mika Westerberg).

   - Fix an ACPI EC regression introduced during the 3.17 cycle causing
     some Samsung laptops to misbehave as a result of a workaround
     targeted at some Acer machines.  That includes a revert of a commit
     that went too far and a quirk for the Acer machines in question.
     From Lv Zheng.

   - Fix a regression in the system suspend error code path introduced
     during the 3.15 cycle that causes it to fail to take errors from
     asychronous execution of "late" suspend callbacks into account
     (Imre Deak).

   - Fix a long-standing bug in the hibernation resume error code path
     that fails to roll back everything correcty on "freeze" callback
     errors and leaves some devices in a "suspended" state causing more
     breakage to happen subsequently (Imre Deak).

   - Make the cpufreq-dt driver disable operation performance points
     that are not supported by the VR connected to the CPU voltage plane
     with acceptable tolerance instead of constantly failing voltage
     scaling later on (Lucas Stach)"

* tag 'pm+acpi-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / EC: Fix regression due to conflicting firmware behavior between Samsung and Acer.
  Revert "ACPI / EC: Add support to disallow QR_EC to be issued before completing previous QR_EC"
  cpufreq: cpufreq-dt: Restore default cpumask_setall(policy->cpus)
  PM / Sleep: fix recovery during resuming from hibernation
  PM / Sleep: fix async suspend_late/freeze_late error handling
  ACPI: Use ACPI companion to match only the first physical device
  cpufreq: cpufreq-dt: disable unsupported OPPs

397 files changed:
Documentation/ABI/testing/sysfs-ibft
Documentation/DocBook/media/Makefile
Documentation/DocBook/media/v4l/compat.xml
Documentation/HOWTO
Documentation/SubmittingPatches
Documentation/development-process/2.Process
Documentation/development-process/8.Conclusion
Documentation/devicetree/bindings/net/smsc-lan91c111.txt
Documentation/devicetree/bindings/sound/sgtl5000.txt
Documentation/devicetree/bindings/submitting-patches.txt
Documentation/kernel-parameters.txt
Documentation/kmemleak.txt
Documentation/prctl/Makefile
Documentation/ptp/testptp.mk [new file with mode: 0644]
Documentation/vDSO/Makefile
Documentation/vDSO/vdso_standalone_test_x86.c
Documentation/vm/hugetlbpage.txt
MAINTAINERS
arch/arm/boot/dts/omap3-n900.dts
arch/arm/kernel/asm-offsets.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mm/init.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/syscall_table.S
arch/microblaze/pci/pci-common.c
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/slice.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/hv-gpci.c
arch/powerpc/platforms/powernv/opal-lpc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/s390/kernel/perf_cpum_sf.c
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/x86/Kconfig
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/preempt.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/i8259.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/mm/pageattr.c
arch/x86/platform/intel-mid/sfi.c
block/blk-merge.c
block/elevator.c
block/scsi_ioctl.c
drivers/base/dma-contiguous.c
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/block/null_blk.c
drivers/block/sunvdc.c
drivers/block/zram/zram_drv.c
drivers/clocksource/arm_arch_timer.c
drivers/edac/cpc925_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/i3200_edac.c
drivers/edac/i82860_edac.c
drivers/hid/hid-debug.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/usbhid/hid-quirks.c
drivers/infiniband/hw/mlx4/main.c
drivers/media/common/saa7146/saa7146_core.c
drivers/media/pci/cx23885/cx23885-dvb.c
drivers/media/pci/tw68/Kconfig
drivers/media/pci/tw68/tw68-core.c
drivers/media/platform/Kconfig
drivers/media/platform/exynos4-is/Kconfig
drivers/media/platform/exynos4-is/fimc-core.c
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/s5p-tv/Kconfig
drivers/media/platform/vivid/Kconfig
drivers/media/platform/vivid/vivid-tpg.c
drivers/media/radio/wl128x/fmdrv_common.c
drivers/media/tuners/xc5000.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb-v2/anysee.c
drivers/media/usb/em28xx/em28xx-core.c
drivers/media/usb/em28xx/em28xx-input.c
drivers/media/usb/hackrf/hackrf.c
drivers/media/usb/usbvision/usbvision-video.c
drivers/media/usb/uvc/uvc_v4l2.c
drivers/media/usb/uvc/uvc_video.c
drivers/media/usb/uvc/uvcvideo.h
drivers/media/v4l2-core/videobuf-dma-contig.c
drivers/misc/cxl/fault.c
drivers/misc/cxl/native.c
drivers/net/Kconfig
drivers/net/dsa/mv88e6171.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/fs_enet/mac-scc.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/def.h
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/of/of_reserved_mem.c
drivers/pci/host/pci-imx6.c
drivers/pci/hotplug/pciehp_core.c
drivers/pci/pci-sysfs.c
drivers/regulator/rk808-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-bq32k.c
drivers/rtc/rtc-pm8xxx.c
drivers/rtc/rtc-s3c.c
drivers/spi/spi-dw.c
drivers/spi/spi-orion.c
drivers/spi/spi-pl022.c
drivers/spi/spi-rockchip.c
drivers/spi/spidev.c
drivers/video/console/fbcon.c
drivers/video/console/vgacon.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
drivers/video/fbdev/omap2/displays-new/panel-dpi.c
drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
drivers/video/fbdev/omap2/dss/apply.c
drivers/video/fbdev/omap2/dss/dispc.c
drivers/video/fbdev/omap2/dss/dispc.h
drivers/video/fbdev/omap2/dss/dpi.c
drivers/video/fbdev/omap2/dss/dsi.c
drivers/video/fbdev/omap2/dss/dss.c
drivers/video/fbdev/omap2/dss/hdmi4.c
drivers/video/fbdev/omap2/dss/hdmi5.c
drivers/video/fbdev/omap2/dss/hdmi_pll.c
drivers/video/fbdev/omap2/dss/rfbi.c
drivers/video/fbdev/omap2/dss/sdi.c
drivers/video/fbdev/omap2/dss/venc.c
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
fs/buffer.c
fs/ext3/super.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/jbd/revoke.c
fs/jbd2/revoke.c
fs/namei.c
fs/nfsd/nfs4proc.c
fs/notify/inode_mark.c
fs/ocfs2/namei.c
fs/quota/dquot.c
include/linux/blkdev.h
include/linux/compiler-gcc4.h
include/linux/compiler-gcc5.h
include/linux/khugepaged.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/of_reserved_mem.h
include/linux/regulator/consumer.h
include/linux/skbuff.h
include/linux/usb/usbnet.h
include/net/ipv6.h
include/net/netfilter/ipv4/nf_reject.h
include/net/netfilter/ipv6/nf_reject.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_masq.h
include/trace/events/rcu.h
include/uapi/linux/input.h
include/uapi/linux/perf_event.h
include/uapi/linux/sched.h
include/uapi/linux/v4l2-dv-timings.h
init/Kconfig
kernel/Makefile
kernel/bpf/Makefile
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/context_tracking.c
kernel/cpu.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/futex.c
kernel/gcov/Kconfig
kernel/kmod.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sysctl.c
kernel/time/clockevents.c
kernel/time/posix-timers.c
kernel/trace/ftrace.c
kernel/trace/trace_syscalls.c
lib/bitmap.c
lib/scatterlist.c
mm/balloon_compaction.c
mm/compaction.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mmap.c
mm/page-writeback.c
mm/page_cgroup.c
mm/rmap.c
mm/slab_common.c
net/Kconfig
net/bridge/br_forward.c
net/bridge/br_netfilter.c
net/bridge/netfilter/nf_tables_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/core/dev.c
net/core/ethtool.c
net/core/skbuff.c
net/core/tso.c
net/dsa/dsa.c
net/ipv4/af_inet.c
net/ipv4/gre_offload.c
net/ipv4/inet_fragment.c
net/ipv4/ip_output.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nft_masq_ipv4.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_offload.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nft_masq_ipv6.c
net/ipv6/output_core.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/mac80211/cfg.c
net/mac80211/rate.c
net/mac80211/rc80211_minstrel_debugfs.c
net/mac80211/rc80211_minstrel_ht_debugfs.c
net/mac80211/sta_info.h
net/mpls/Makefile
net/mpls/mpls_gso.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_compat.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netlink/af_netlink.c
net/openvswitch/datapath.c
net/sched/sch_api.c
net/sched/sch_pie.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/wireless/nl80211.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
samples/bpf/test_verifier.c
security/integrity/evm/evm_main.c
security/integrity/ima/ima_appraise.c
security/integrity/integrity.h
sound/core/pcm_compat.c
sound/firewire/bebob/bebob_focusrite.c
sound/firewire/bebob/bebob_stream.c
sound/firewire/bebob/bebob_terratec.c
sound/pci/ad1889.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/codecs/adau1761.c
sound/soc/fsl/fsl_asrc.c
sound/soc/fsl/fsl_esai.c
sound/soc/intel/sst-haswell-pcm.c
sound/soc/s6000/Kconfig [deleted file]
sound/soc/s6000/Makefile [deleted file]
sound/soc/s6000/s6000-i2s.c [deleted file]
sound/soc/s6000/s6000-i2s.h [deleted file]
sound/soc/s6000/s6000-pcm.c [deleted file]
sound/soc/s6000/s6000-pcm.h [deleted file]
sound/soc/s6000/s6105-ipcam.c [deleted file]
tools/perf/builtin-diff.c
tools/perf/builtin-probe.c
tools/perf/perf-sys.h
tools/perf/util/header.c
tools/perf/util/sort.c
tools/perf/util/thread.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/unwind.h

index c2b7d1154bec62559790d619320eb5f75cbc4645..cac3930bdb04d991a64e4925dc2657ad55c2fbd6 100644 (file)
@@ -20,4 +20,4 @@ Date:         November 2007
 Contact:       Konrad Rzeszutek <ketuzsezr@darnok.org>
 Description:   The /sys/firmware/ibft/ethernetX directory will contain
                files that expose the iSCSI Boot Firmware Table NIC data.
-               This can this can the IP address, MAC, and gateway of the NIC.
+               Usually this contains the IP address, MAC, and gateway of the NIC.
index df2962d9e11e1f79e84779557c68bc73ea0f1670..8bf7c6191296f6790bd98836711e73d34deddb9a 100644 (file)
@@ -25,7 +25,7 @@ GENFILES := $(addprefix $(MEDIA_OBJ_DIR)/, $(MEDIA_TEMP))
 PHONY += cleanmediadocs
 
 cleanmediadocs:
-       -@rm `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null
+       -@rm -f `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null
 
 $(obj)/media_api.xml: $(GENFILES) FORCE
 
index 07ffc76553ba098490cbba874f6f513f8981222e..0a2debfa68f616fa0aa15afc53687edfe1484263 100644 (file)
@@ -2566,6 +2566,10 @@ fields changed from _s32 to _u32.
          <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
          </para>
         </listitem>
+      </orderedlist>
+    </section>
+
+    <section>
       <title>V4L2 in Linux 3.18</title>
       <orderedlist>
        <listitem>
index 57cf5efb044da7f3c29586466455eaa1b55a77ae..93aa8604630e769dd531fe8856c59d6d7390e6cc 100644 (file)
@@ -324,7 +324,6 @@ tree, they need to be integration-tested.  For this purpose, a special
 testing repository exists into which virtually all subsystem trees are
 pulled on an almost daily basis:
        http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
-       http://linux.f-seidel.de/linux-next/pmwiki/
 
 This way, the -next kernel gives a summary outlook onto what will be
 expected to go into the mainline kernel at the next merge period.
index 482c74947de02939eebfca089a344373707bf69d..1fa1caa198eb2e46be93b8f957c0c3513ed45988 100644 (file)
@@ -483,12 +483,10 @@ have been included in the discussion
 
 14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
 
-If this patch fixes a problem reported by somebody else, consider adding a
-Reported-by: tag to credit the reporter for their contribution.  Please
-note that this tag should not be added without the reporter's permission,
-especially if the problem was not reported in a public forum.  That said,
-if we diligently credit our bug reporters, they will, hopefully, be
-inspired to help us again in the future.
+The Reported-by tag gives credit to people who find bugs and report them and it
+hopefully inspires them to help us again in the future.  Please note that if
+the bug was reported in private, then ask for permission first before using the
+Reported-by tag.
 
 A Tested-by: tag indicates that the patch has been successfully tested (in
 some environment) by the person named.  This tag informs maintainers that
index 2e0617936e8f7f7624d0920be7ff3ae3d6217f68..c24e156a611842017eeae45ce267c821e28174e8 100644 (file)
@@ -289,10 +289,6 @@ lists when they are assembled; they can be downloaded from:
 
        http://www.kernel.org/pub/linux/kernel/next/
 
-Some information about linux-next has been gathered at:
-
-       http://linux.f-seidel.de/linux-next/pmwiki/
-
 Linux-next has become an integral part of the kernel development process;
 all patches merged during a given merge window should really have found
 their way into linux-next some time before the merge window opens.
index 1990ab4b4949663852863aea0f2ece0c6341807d..caef69022e9c2a11d77221f8386a7091ada26b0a 100644 (file)
@@ -22,10 +22,6 @@ Beyond that, a valuable resource for kernel developers is:
 
        http://kernelnewbies.org/
 
-Information about the linux-next tree gathers at:
-
-       http://linux.f-seidel.de/linux-next/pmwiki/
-
 And, of course, one should not forget http://kernel.org/, the definitive
 location for kernel release information.
 
index 0f8487b888221e6344dec76afd6a8ffa44242306..e77e167593db2aa585ca1afbb48e4fbac0287384 100644 (file)
@@ -11,3 +11,5 @@ Optional properties:
   are supported on the device.  Valid value for SMSC LAN91c111 are
   1, 2 or 4.  If it's omitted or invalid, the size would be 2 meaning
   16-bit access only.
+- power-gpios: GPIO to control the PWRDWN pin
+- reset-gpios: GPIO to control the RESET pin
index 955df60a118c5465b9884369db48533ba17af121..d556dcb8816bd9537e2359f9e4e365ec873283d2 100644 (file)
@@ -7,10 +7,20 @@ Required properties:
 
 - clocks : the clock provider of SYS_MCLK
 
+- VDDA-supply : the regulator provider of VDDA
+
+- VDDIO-supply: the regulator provider of VDDIO
+
+Optional properties:
+
+- VDDD-supply : the regulator provider of VDDD
+
 Example:
 
 codec: sgtl5000@0a {
        compatible = "fsl,sgtl5000";
        reg = <0x0a>;
        clocks = <&clks 150>;
+       VDDA-supply = <&reg_3p3v>;
+       VDDIO-supply = <&reg_3p3v>;
 };
index 042a0273b8bab819ac77e22d0274805c428aad52..b7ba01ad1426cfdb61ede7e68860087df4606a86 100644 (file)
@@ -12,6 +12,9 @@ I. For patch submitters
 
        devicetree@vger.kernel.org
 
+  3) The Documentation/ portion of the patch should come in the series before
+     the code implementing the binding.
+
 II. For kernel maintainers
 
   1) If you aren't comfortable reviewing a given binding, reply to it and ask
index 74339c57b914f94caa72bc67dc3b416674f9b76b..db034a5912e76222965206b4857a52bb1f0a0066 100644 (file)
@@ -1307,6 +1307,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        .cdrom .chs .ignore_cable are additional options
                        See Documentation/ide/ide.txt.
 
+       ide-generic.probe-mask= [HW] (E)IDE subsystem
+                       Format: <int>
+                       Probe mask for legacy ISA IDE ports.  Depending on
+                       platform up to 6 ports are supported, enabled by
+                       setting corresponding bits in the mask to 1.  The
+                       default value is 0x0, which has a special meaning.
+                       On systems that have PCI, it triggers scanning the
+                       PCI bus for the first and the second port, which
+                       are then probed.  On systems without PCI the value
+                       of 0x0 enables probing the two first ports as if it
+                       was 0x3.
+
        ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
                        Claim all unknown PCI IDE storage controllers.
 
@@ -1587,6 +1599,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        kmemleak=       [KNL] Boot-time kmemleak enable/disable
                        Valid arguments: on, off
                        Default: on
+                       Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
+                       the default is off.
 
        kmemcheck=      [X86] Boot-time kmemcheck enable/disable/one-shot mode
                        Valid arguments: 0, 1, 2
index f4f033c8d856e44ac85ba655da2348230acd8c4e..45e777f4e41de846eb485b579d0e934e0a512d97 100644 (file)
@@ -62,6 +62,10 @@ Memory may be allocated or freed before kmemleak is initialised and
 these actions are stored in an early log buffer. The size of this buffer
 is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
 
+If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
+disabled by default. Passing "kmemleak=on" on the kernel command
+line enables the function. 
+
 Basic Algorithm
 ---------------
 
index 3e3232dcb2b828eb4dd5e73a81daf898f7e82e5f..2948b7b124b92c8412929d53533526c5ef704d13 100644 (file)
@@ -1,5 +1,5 @@
 # List of programs to build
-hostprogs-y := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
+hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk
new file mode 100644 (file)
index 0000000..4ef2d97
--- /dev/null
@@ -0,0 +1,33 @@
+# PTP 1588 clock support - User space test program
+#
+# Copyright (C) 2010 OMICRON electronics GmbH
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+CC        = $(CROSS_COMPILE)gcc
+INC       = -I$(KBUILD_OUTPUT)/usr/include
+CFLAGS    = -Wall $(INC)
+LDLIBS    = -lrt
+PROGS     = testptp
+
+all: $(PROGS)
+
+testptp: testptp.o
+
+clean:
+       rm -f testptp.o
+
+distclean: clean
+       rm -f $(PROGS)
index 2b99e57207c161db7fa442f16ff99e16414a1a79..ee075c3d21248cff5f409585116c3ab52af6a575 100644 (file)
@@ -10,3 +10,6 @@ always := $(hostprogs-y)
 HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99
 HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector
 HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib
+ifeq ($(CONFIG_X86_32),y)
+HOSTLOADLIBES_vdso_standalone_test_x86 += -lgcc_s
+endif
index d46240265c5001b767c0ba0da8c4397646d6f0c1..93b0ebf8cc38d06327732962a36e5005f948817e 100644 (file)
@@ -63,7 +63,7 @@ static inline void linux_exit(int code)
        x86_syscall3(__NR_exit, code, 0, 0);
 }
 
-void to_base10(char *lastdig, uint64_t n)
+void to_base10(char *lastdig, time_t n)
 {
        while (n) {
                *lastdig = (n % 10) + '0';
index bdd4bb97fff709114f8374082b670e9f32ade7f9..b64e0af9cc56f676d731b7ad3436f15a498ca147 100644 (file)
@@ -274,7 +274,7 @@ This command mounts a (pseudo) filesystem of type hugetlbfs on the directory
 /mnt/huge.  Any files created on /mnt/huge uses huge pages.  The uid and gid
 options sets the owner and group of the root of the file system.  By default
 the uid and gid of the current process are taken.  The mode option sets the
-mode of root of file system to value & 0777.  This value is given in octal.
+mode of root of file system to value & 01777.  This value is given in octal.
 By default the value 0755 is picked. The size option sets the maximum value of
 memory (huge pages) allowed for that filesystem (/mnt/huge). The size is
 rounded down to HPAGE_SIZE.  The option nr_inodes sets the maximum number of
index dab92a78d1d5e5756d474109b3061290afd67550..1cfabdd1d23f6b1311cd00d954e887ef37f03ae7 100644 (file)
@@ -4608,7 +4608,7 @@ S:        Supported
 F:     drivers/crypto/nx/
 
 IBM Power 842 compression accelerator
-M:     Nathan Fontenot <nfont@linux.vnet.ibm.com>
+M:     Dan Streetman <ddstreet@us.ibm.com>
 S:     Supported
 F:     drivers/crypto/nx/nx-842.c
 F:     include/linux/nx842.h
index 739fcf29c6439af97b6a8bca100db0b3fd082e88..bc82a12d4c2c3bb6b294c2c99d4a64d3568537f0 100644 (file)
                bank-width = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&ethernet_pins>;
+               power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;     /* gpio86 */
+               reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>;      /* gpio164 */
                gpmc,device-width = <2>;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
index 713e807621d2cf6a785660d40fec9367866eedcd..2d2d6087b9b105d5dadcd66f9821deefe50d1e66 100644 (file)
@@ -10,6 +10,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/compiler.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
  * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
  *            (http://gcc.gnu.org/PR8896) and incorrect structure
  *           initialisation in fs/jffs2/erase.c
+ * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
+ *           miscompiles find_get_entry(), and can result in EXT3 and EXT4
+ *           filesystem corruption (possibly other FS too).
  */
+#ifdef __GNUC__
 #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
 #error Your compiler is too buggy; it is known to miscompile kernels.
-#error    Known good compilers: 3.3
+#error    Known good compilers: 3.3, 4.x
+#endif
+#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
+#error Your compiler is too buggy; it is known to miscompile kernels
+#error and result in filesystem corruption and oopses.
+#endif
 #endif
 
 int main(void)
index c95346c948297fcb14f5f6a5dd33d71b7247166a..cec9d6c6442c80b03d3d6954c2c3feb65a62176f 100644 (file)
@@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void)
                platform_device_register(&omap3_rom_rng_device);
 
        }
-
-       /* Only on some development boards */
-       gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset");
 }
 
 static void __init omap3_tao3530_legacy_init(void)
index 92bba32d92304c4383d43bee8ef95f7d988602c6..9481f85c56e6fd0ed263401a3e71d0cfa541606c 100644 (file)
@@ -559,10 +559,10 @@ void __init mem_init(void)
 #ifdef CONFIG_MODULES
                        "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 #endif
-                       "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
-                       "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
-                       "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
-                       "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
+                       "      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
+                       "      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
+                       "      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
+                       "       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",
 
                        MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
                                (PAGE_SIZE)),
index 6feded3b0c4c1eb6d1414082a9f9318041129a39..a7736fa0580cc2e000cf1613d7b21dbe7aba4dcc 100644 (file)
@@ -129,6 +129,10 @@ endmenu
 
 menu "Kernel features"
 
+config NR_CPUS
+       int
+       default "1"
+
 config ADVANCED_OPTIONS
        bool "Prompt for advanced kernel configuration options"
        help
index ea4b233647c1aa32d23562e7abba7dab72d404db..0a53362d55486187ea3aada0ff999b04de97d7ec 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         387
+#define __NR_syscalls         388
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 1c2380bf8fe60850274dce0bdbc9e9bb16cc531f..c712677f8a2a6d7460e245d8e703adaa53372070 100644 (file)
 #define __NR_seccomp           384
 #define __NR_getrandom         385
 #define __NR_memfd_create      386
+#define __NR_bpf               387
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index de59ee1d7010a788cf916b4ade098375af6214d6..0166e890486c72d9b9263b6d46553dc8b50d6b4a 100644 (file)
@@ -387,3 +387,4 @@ ENTRY(sys_call_table)
        .long sys_seccomp
        .long sys_getrandom             /* 385 */
        .long sys_memfd_create
+       .long sys_bpf
index 9037914f6985df1dfd8227bbc6f7e943aa15a3ea..b30e41c0c0335cf2ab79e716c9c41c0ebced18e8 100644 (file)
@@ -660,8 +660,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        res = &hose->mem_resources[memno++];
                        break;
                }
-               if (res != NULL)
-                       of_pci_range_to_resource(&range, dev, res);
+               if (res != NULL) {
+                       res->name = dev->full_name;
+                       res->flags = range.flags;
+                       res->start = range.cpu_addr;
+                       res->end = range.cpu_addr + range.size - 1;
+                       res->parent = res->child = res->sibling = NULL;
+               }
        }
 
        /* If there's an ISA hole and the pci_mem_offset is -not- matching
index 623f2971ce0ed8d4d947642c862d4dce7640d28d..766b77d527ac6e1c7286c4d1618229b5f2dac72e 100644 (file)
@@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
 
 void flush_dcache_icache_hugepage(struct page *page);
 
-#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
+#if defined(CONFIG_PPC_MM_SLICES)
 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
                           unsigned long len);
 #else
index 7d8a600688058ec5025cfd3688b6eb369cdadea5..ce9577d693be1c7849c18ac0259a35525d46c6fe 100644 (file)
@@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2)
 SYSCALL_SPU(seccomp)
 SYSCALL_SPU(getrandom)
 SYSCALL_SPU(memfd_create)
+SYSCALL_SPU(bpf)
index 4e9af3fd43e7d062755255d932c2123ea5f54c8a..e0da021caa004205fc8b645d95b3047c8cd9b73e 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          361
+#define __NR_syscalls          362
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 0688fc06e18394268002ae2b7e419f183e6a7dc4..f55351f2e66e962097bc078c25f77a176ca52e2a 100644 (file)
 #define __NR_seccomp           358
 #define __NR_getrandom         359
 #define __NR_memfd_create      360
+#define __NR_bpf               361
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 0f9939e693df6b8422a7b87301f7dcd250816c34..5a236f082c78386a47b9b415f98f619e8e615688 100644 (file)
@@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
        u64 vsid;
        int psize, ssize;
 
-       slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
-
        switch (REGION_ID(ea)) {
        case USER_REGION_ID:
                pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
@@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
        vsid |= mmu_psize_defs[psize].sllp |
                ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
 
+       slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
        slb->vsid = vsid;
 
        return 0;
index e5236c24dc0766112283618da9a259cf88a61735..b9d1dfdbe5bbaa5bdaad7446ec945aa909f2befd 100644 (file)
@@ -1509,11 +1509,14 @@ static int update_cpu_topology(void *data)
        cpu = smp_processor_id();
 
        for (update = data; update; update = update->next) {
+               int new_nid = update->new_nid;
                if (cpu != update->cpu)
                        continue;
 
-               unmap_cpu_from_node(update->cpu);
-               map_cpu_to_node(update->cpu, update->new_nid);
+               unmap_cpu_from_node(cpu);
+               map_cpu_to_node(cpu, new_nid);
+               set_cpu_numa_node(cpu, new_nid);
+               set_cpu_numa_mem(cpu, local_memory_node(new_nid));
                vdso_getcpu_init();
        }
 
index 8d7bda94d1969b2325073315875a05545922b0fc..ded0ea1afde4021578d21d2e969318ea6cd046eb 100644 (file)
@@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
        slice_convert(mm, mask, psize);
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
 /*
  * is_hugepage_only_range() is used by generic code to verify whether
  * a normal mmap mapping (non hugetlbfs) is valid on a given area.
@@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 #endif
        return !slice_check_fit(mask, available);
 }
-
+#endif
index 6c8710dd90c9b12d7e746113b43fdc9231c8b8b3..dba34088da2896187a390ee720af2a4a65bc3216 100644 (file)
@@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags)
        return 0;
 }
 
-static int h_24x7_event_idx(struct perf_event *event)
-{
-       return 0;
-}
-
 static struct pmu h_24x7_pmu = {
        .task_ctx_nr = perf_invalid_context,
 
@@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = {
        .start       = h_24x7_event_start,
        .stop        = h_24x7_event_stop,
        .read        = h_24x7_event_update,
-       .event_idx   = h_24x7_event_idx,
 };
 
 static int hv_24x7_init(void)
index 15fc76c930227e7b2cff67cfc00889de1e4d6c97..a051fe946c63a2eecc88b041e0ce67f14f329b4b 100644 (file)
@@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event)
        return 0;
 }
 
-static int h_gpci_event_idx(struct perf_event *event)
-{
-       return 0;
-}
-
 static struct pmu h_gpci_pmu = {
        .task_ctx_nr = perf_invalid_context,
 
@@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = {
        .start       = h_gpci_event_start,
        .stop        = h_gpci_event_stop,
        .read        = h_gpci_event_update,
-       .event_idx   = h_gpci_event_idx,
 };
 
 static int hv_gpci_init(void)
index dd2c285ad1708d755b5a1950558566149e837839..ad4b31df779a389ceb83b1c62d77a850e9c1b749 100644 (file)
@@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
 {
        struct lpc_debugfs_entry *lpc = filp->private_data;
        u32 data, pos, len, todo;
-       __be32 bedata;
        int rc;
 
        if (!access_ok(VERIFY_WRITE, ubuf, count))
@@ -214,10 +213,9 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
                                len = 2;
                }
                rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
-                                  &bedata, len);
+                                  &data, len);
                if (rc)
                        return -ENXIO;
-               data = be32_to_cpu(bedata);
                switch(len) {
                case 4:
                        rc = __put_user((u32)data, (u32 __user *)ubuf);
index e9e2450c1fddd3bb6988084fb38039a8b3c3300b..feb549aa3eea98ee5369f6c3ecf5087dc871b4d8 100644 (file)
@@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1);                                                \
  */
 
 #define OPAL_CALL(name, token)         \
- _GLOBAL(name);                                \
+ _GLOBAL_TOC(name);                    \
        mflr    r0;                     \
        std     r0,16(r1);              \
        li      r0,token;               \
index 08e761318c17d786d2e40ce85fcd0b631fecc731..b878f12a9597201476ffda4aabaf676f66735b99 100644 (file)
@@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
        perf_pmu_enable(event->pmu);
 }
 
-static int cpumsf_pmu_event_idx(struct perf_event *event)
-{
-       return event->hw.idx;
-}
-
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
@@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = {
        .stop         = cpumsf_pmu_stop,
        .read         = cpumsf_pmu_read,
 
-       .event_idx    = cpumsf_pmu_event_idx,
        .attr_groups  = cpumsf_pmu_attr_groups,
 };
 
index 9139d14b9c53460587b4be67de041e44e49893fe..538c10db35379dfd3b4c97fb137453609b0ded7c 100644 (file)
@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
 };
 
 static struct resource scif0_resources[] = {
-       DEFINE_RES_MEM(0xfffffe80, 0x100),
+       DEFINE_RES_MEM(0xfffffe80, 0x10),
        DEFINE_RES_IRQ(evt2irq(0x4e0)),
 };
 
@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
 };
 
 static struct resource scif1_resources[] = {
-       DEFINE_RES_MEM(0xa4000150, 0x100),
+       DEFINE_RES_MEM(0xa4000150, 0x10),
        DEFINE_RES_IRQ(evt2irq(0x900)),
 };
 
@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
 };
 
 static struct resource scif2_resources[] = {
-       DEFINE_RES_MEM(0xa4000140, 0x100),
+       DEFINE_RES_MEM(0xa4000140, 0x10),
        DEFINE_RES_IRQ(evt2irq(0x880)),
 };
 
index c842a89b11903aaa56d89ff65fd3b87658046a68..46d83842eddc6ceae0496b6397a0a04e77ce0e10 100644 (file)
 #define __NR_seccomp           346
 #define __NR_getrandom         347
 #define __NR_memfd_create      348
+#define __NR_bpf               349
 
-#define NR_syscalls            349
+#define NR_syscalls            350
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 6a873c344bc0b970b4f0da915f4e5042763b82b9..ad0cdf497b785de84fe3f8f5e1e092a4f2de7429 100644 (file)
@@ -86,4 +86,4 @@ sys_call_table:
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
-/*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create
+/*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
index d9151b6490d8810b2bbd2b0f3bee53e535711375..580cde9370c9317a2cc8b6522579d5c38fa48ee3 100644 (file)
@@ -87,7 +87,7 @@ sys_call_table32:
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
-       .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create
+       .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 
 #endif /* CONFIG_COMPAT */
 
@@ -166,4 +166,4 @@ sys_call_table:
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
-       .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create
+       .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
index f2327e88e07cc64236636a9f5973af898be947f1..ded8a6774ac99a6b9fd6708e440f66bc2f6e7433 100644 (file)
@@ -142,6 +142,10 @@ config INSTRUCTION_DECODER
        def_bool y
        depends on KPROBES || PERF_EVENTS || UPROBES
 
+config PERF_EVENTS_INTEL_UNCORE
+       def_bool y
+       depends on PERF_EVENTS && SUP_SUP_INTEL && PCI
+
 config OUTPUT_FORMAT
        string
        default "elf32-i386" if X86_32
index 8ffba18395c8bb0a93c6a9dc65cfd8739a5937f3..ffe71228fc10c3080795c576b1ffae6b62160d7f 100644 (file)
@@ -157,7 +157,7 @@ ENTRY(ia32_sysenter_target)
         * ourselves.  To save a few cycles, we can check whether
         * NT was set instead of doing an unconditional popfq.
         */
-       testl $X86_EFLAGS_NT,EFLAGS(%rsp)       /* saved EFLAGS match cpu */
+       testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
        jnz sysenter_fix_flags
 sysenter_flags_fixed:
 
index 7024c12f7bfe9cd15252f29f3502db75d92ebccc..400873450e33ab86d734471e2f07fd0ed775dfa1 100644 (file)
@@ -105,6 +105,7 @@ static __always_inline bool should_resched(void)
 # ifdef CONFIG_CONTEXT_TRACKING
     extern asmlinkage void ___preempt_schedule_context(void);
 #   define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
+    extern asmlinkage void preempt_schedule_context(void);
 # endif
 #endif
 
index b436fc735aa455be7f007a7bcb089741ace98c1c..a142e77693e179334987d3502e33c5444fde885c 100644 (file)
@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
 
        /* Don't set up the ACPI SCI because it's already set up */
        if (acpi_gbl_FADT.sci_interrupt == gsi)
-               return gsi;
+               return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
 
        trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
        polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
-       int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+       int irq;
 
-       if (irq >= 0) {
+       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+               *irqp = gsi;
+       } else {
+               irq = mp_map_gsi_to_irq(gsi,
+                                       IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+               if (irq < 0)
+                       return -1;
                *irqp = irq;
-               return 0;
        }
-
-       return -1;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
index 5972b108f15a739a06b47276a05d86713d08a3a2..b708738d016e09f44e15040333741382caaffd34 100644 (file)
@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
 
        irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
        irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
-       /* APB timer irqs are set up as mp_irqs, timer is edge type */
-       __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
 }
 
 /* Should be called with per cpu */
index 00853b254ab09c36afdb2a681bd9c68037ac17dc..ba6cc041edb12e23a783fc0c5aaaed788af39dc0 100644 (file)
@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
        unsigned int value, queued;
        int i, j, acked = 0;
        unsigned long long tsc = 0, ntsc;
-       long long max_loops = cpu_khz;
+       long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
        if (cpu_has_tsc)
                rdtscll(tsc);
@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
                        break;
                }
                if (queued) {
-                       if (cpu_has_tsc) {
+                       if (cpu_has_tsc && cpu_khz) {
                                rdtscll(ntsc);
                                max_loops = (cpu_khz << 10) - (ntsc - tsc);
                        } else
index 01d5453b5502c235759712b751da809123b565cc..e27b49d7c922a3caaa6c45446e8bc383a86d27bc 100644 (file)
@@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD)            += perf_event_amd_iommu.o
 endif
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_knc.o perf_event_p4.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
-obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_rapl.o
+
+obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
+                                          perf_event_intel_uncore_snb.o \
+                                          perf_event_intel_uncore_snbep.o \
+                                          perf_event_intel_uncore_nhmex.o
 endif
 
 
index 1ef456273172c8875e5673d890916dd70033e8e4..9cc6b6f25f424d18426adbab3e31e251e78c13af 100644 (file)
@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_F00F_BUG
        /*
-        * All current models of Pentium and Pentium with MMX technology CPUs
+        * All models of Pentium and Pentium with MMX technology CPUs
         * have the F0 0F bug, which lets nonprivileged users lock up the
         * system. Announce that the fault handler will be checking for it.
+        * The Quark is also family 5, but does not have the same bug.
         */
        clear_cpu_bug(c, X86_BUG_F00F);
-       if (!paravirt_enabled() && c->x86 == 5) {
+       if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
                static int f00f_workaround_enabled;
 
                set_cpu_bug(c, X86_BUG_F00F);
index 1b8299dd3d919de522374301b6b33777412e7e7b..143e5f5dc8551b568b7eabb84d83da764268d6b2 100644 (file)
@@ -243,8 +243,9 @@ static bool check_hw_exists(void)
 
 msr_fail:
        printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
-       printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR
-              "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
+       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+               boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
+               reg, val_new);
 
        return false;
 }
@@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event)
        if (event->attr.type == PERF_TYPE_RAW)
                event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
 
-       if (event->attr.sample_period && x86_pmu.limit_period) {
-               if (x86_pmu.limit_period(event, event->attr.sample_period) >
-                               event->attr.sample_period)
-                       return -EINVAL;
-       }
-
        return x86_setup_perfctr(event);
 }
 
@@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event)
        if (left > x86_pmu.max_period)
                left = x86_pmu.max_period;
 
-       if (x86_pmu.limit_period)
-               left = x86_pmu.limit_period(event, left);
-
        per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
        /*
index d98a34d435d7b5260154bcf4463bbcb24ad073fb..fc5eb390b3685d62b58351f1a281e94c22cc8645 100644 (file)
@@ -445,7 +445,6 @@ struct x86_pmu {
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
        bool            late_ack;
-       unsigned        (*limit_period)(struct perf_event *event, unsigned l);
 
        /*
         * sysfs attrs
index a73947c53b65aa683f29d61a115d0e6d19a9ec5b..944bf019b74f425e06cc465358d25b85741a5b47 100644 (file)
@@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
-static struct event_constraint intel_bdw_event_constraints[] = {
-       FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
-       FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
-       INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
-       INTEL_EVENT_CONSTRAINT(0xa3, 0x4),      /* CYCLE_ACTIVITY.* */
-       EVENT_CONSTRAINT_END
-};
-
 static u64 intel_pmu_event_map(int hw_event)
 {
        return intel_perfmon_event_map[hw_event];
@@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids
 
 };
 
-static __initconst const u64 hsw_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
-               [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
-               [ C(RESULT_ACCESS) ] = 0x1b7,
-               /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
-                   L3_MISS|ANY_SNOOP */
-               [ C(RESULT_MISS)   ] = 0x1b7,
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE:ALL_RFO */
-               /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
-               [ C(RESULT_MISS)   ] = 0x1b7,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
-               [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
-               [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
-               [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
-               [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
-static __initconst const u64 hsw_hw_cache_extra_regs
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
-               [ C(RESULT_ACCESS) ] = 0x2d5,
-               /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
-                   L3_MISS|ANY_SNOOP */
-               [ C(RESULT_MISS)   ] = 0x3fbc0202d5ull,
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x122,   /* OFFCORE_RESPONSE:ALL_RFO */
-               /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
-               [ C(RESULT_MISS)   ] = 0x3fbc020122ull,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
-};
-
 static __initconst const u64 westmere_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
        return c;
 }
 
-/*
- * Broadwell:
- * The INST_RETIRED.ALL period always needs to have lowest
- * 6bits cleared (BDM57). It shall not use a period smaller
- * than 100 (BDM11). We combine the two to enforce
- * a min-period of 128.
- */
-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
-{
-       if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
-                       X86_CONFIG(.event=0xc0, .umask=0x01)) {
-               if (left < 128)
-                       left = 128;
-               left &= ~0x3fu;
-       }
-       return left;
-}
-
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void)
        case 69: /* 22nm Haswell ULT */
        case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
                x86_pmu.late_ack = true;
-               memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
-               memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+               memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+               memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
 
                intel_pmu_lbr_init_snb();
 
@@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void)
                pr_cont("Haswell events, ");
                break;
 
-       case 61: /* 14nm Broadwell Core-M */
-               x86_pmu.late_ack = true;
-               memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
-               memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
-
-               intel_pmu_lbr_init_snb();
-
-               x86_pmu.event_constraints = intel_bdw_event_constraints;
-               x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
-               x86_pmu.extra_regs = intel_snbep_extra_regs;
-               x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               /* all extra regs are per-cpu when HT is on */
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
-               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
-
-               x86_pmu.hw_config = hsw_hw_config;
-               x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
-               x86_pmu.limit_period = bdw_limit_period;
-               pr_cont("Broadwell events, ");
-               break;
-
        default:
                switch (x86_pmu.version) {
                case 1:
index b553ed89e5f5e97346ac460934ce6e016b45455f..344b63f18d14b7d5ae1b55a85a8e28fa7a63642d 100644 (file)
@@ -447,15 +447,14 @@ sysenter_exit:
 sysenter_audit:
        testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
-       addl $4,%esp
-       CFI_ADJUST_CFA_OFFSET -4
-       movl %esi,4(%esp)               /* 5th arg: 4th syscall arg */
-       movl %edx,(%esp)                /* 4th arg: 3rd syscall arg */
-       /* %ecx already in %ecx            3rd arg: 2nd syscall arg */
-       movl %ebx,%edx                  /* 2nd arg: 1st syscall arg */
-       /* %eax already in %eax            1st arg: syscall number */
+       /* movl PT_EAX(%esp), %eax      already set, syscall number: 1st arg to audit */
+       movl PT_EBX(%esp), %edx         /* ebx/a0: 2nd arg to audit */
+       /* movl PT_ECX(%esp), %ecx      already set, a1: 3nd arg to audit */
+       pushl_cfi PT_ESI(%esp)          /* a3: 5th arg */
+       pushl_cfi PT_EDX+4(%esp)        /* a2: 4th arg */
        call __audit_syscall_entry
-       pushl_cfi %ebx
+       popl_cfi %ecx /* get that remapped edx off the stack */
+       popl_cfi %ecx /* get that remapped esi off the stack */
        movl PT_EAX(%esp),%eax          /* reload syscall number */
        jmp sysenter_do_call
 
index 8af817105e29cbc25ffaa8b2430a57c5de782dd6..e7cc5370cd2fcade87dc1cecae2ab85184f62d27 100644 (file)
@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
        io_apic_irqs &= ~(1<<irq);
-       irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
-                                     i8259A_chip.name);
+       irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
        enable_irq(irq);
 }
 
index 44f1ed42fdf2136c0b2654256745d20edca1a28d..4de73ee78361311493163414dc7439b7bbea75e5 100644 (file)
@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
 void __init init_ISA_irqs(void)
 {
        struct irq_chip *chip = legacy_pic->chip;
-       const char *name = chip->name;
        int i;
 
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void)
        legacy_pic->init(0);
 
        for (i = 0; i < nr_legacy_irqs(); i++)
-               irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
+               irq_set_chip_and_handler(i, chip, handle_level_irq);
 }
 
 void __init init_IRQ(void)
index 235cfd39e0d793c23299da10a9eb45f7b29006a4..ab08aa2276fb803a83c5cbf369b6dc69d605a0f8 100644 (file)
@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p)
        setup_real_mode();
 
        memblock_set_current_limit(get_max_mapped());
-       dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
 
        /*
         * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p)
        early_acpi_boot_init();
 
        initmem_init();
+       dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
 
        /*
         * Reserve memory for crash kernel after SRAT is parsed so that it
index 2d5200e56357d49f3466ce55e8ab2b3273306f68..4d2128ac70bdab90afe6a753f090996f30f72ff1 100644 (file)
@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-static DEFINE_PER_CPU(struct completion, die_complete);
-
 atomic_t init_deasserted;
 
 /*
@@ -1318,6 +1316,8 @@ void cpu_disable_common(void)
        fixup_irqs();
 }
 
+static DEFINE_PER_CPU(struct completion, die_complete);
+
 int native_cpu_disable(void)
 {
        int ret;
index b6025f9e36c65d7e83ddf05a7da99922a703a31e..b7e50bba3bbbb98066fac741d826e5f6f4d7e946 100644 (file)
@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
 
        x86_init.timers.tsc_pre_init();
 
-       if (!cpu_has_tsc)
+       if (!cpu_has_tsc) {
+               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
                return;
+       }
 
        tsc_khz = x86_platform.calibrate_tsc();
        cpu_khz = tsc_khz;
 
        if (!tsc_khz) {
                mark_tsc_unstable("could not calculate TSC khz");
+               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
                return;
        }
 
index ae242a7c11c7473cfeb163b78d54fa62005b8e44..36de293caf25c3bfe3fcd062ef95f583d5eb398e 100644 (file)
@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
        psize = page_level_size(level);
        pmask = page_level_mask(level);
        offset = virt_addr & ~pmask;
-       phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+       phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
        return (phys_addr | offset);
 }
 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
index 3c53a90fdb18b8d446bcb6ef3cc711ab543974d8..c14ad34776c466f3cb18fe4b0a50fe40db8cd3ec 100644 (file)
@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
                        mp_irq.dstapic = MP_APIC_ALL;
                        mp_irq.dstirq = pentry->irq;
                        mp_save_irq(&mp_irq);
+                       mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
        }
 
        return 0;
@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
                mp_irq.dstapic = MP_APIC_ALL;
                mp_irq.dstirq = pentry->irq;
                mp_save_irq(&mp_irq);
+               mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
        }
        return 0;
 }
index ba99351c0f58898cb83bd336299c773230e9e73f..b3ac40aef46b317c5a432a92ba9b40c8e1942504 100644 (file)
@@ -99,16 +99,17 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
        bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
                        &q->queue_flags);
+       bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
 
        if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
-                       bio->bi_vcnt < queue_max_segments(q))
+                       merge_not_need)
                bio->bi_phys_segments = bio->bi_vcnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
                bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
-                               no_sg_merge);
+                               no_sg_merge && merge_not_need);
                bio->bi_next = nxt;
        }
 
index 24c28b659bb34f00eae0e4b900266e1dca069169..afa3b037a17c3dfcfb466699544dbaf0fb05221e 100644 (file)
@@ -229,7 +229,9 @@ int elevator_init(struct request_queue *q, char *name)
        }
 
        err = e->ops.elevator_init_fn(q, e);
-       return 0;
+       if (err)
+               elevator_put(e);
+       return err;
 }
 EXPORT_SYMBOL(elevator_init);
 
index abb2e65b24ccb4cbd64e0194d5bdaf9751e2b145..1e053d911240b577df324912bb68af13e6174bcc 100644 (file)
@@ -508,7 +508,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
                err = DRIVER_ERROR << 24;
-               goto out;
+               goto error;
        }
 
        memset(sense, 0, sizeof(sense));
@@ -517,7 +517,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        blk_execute_rq(q, disk, rq, 0);
 
-out:
        err = rq->errors & 0xff;        /* only 8 bit SCSI status */
        if (err) {
                if (rq->sense_len && rq->sense) {
index 473ff48924015f07daee1c81ebe3d6436727493e..950fff9ce45397024ac5751b452cdd96a6da9907 100644 (file)
@@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 #undef pr_fmt
 #define pr_fmt(fmt) fmt
 
-static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
+static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
 {
        dev_set_cma_area(dev, rmem->priv);
+       return 0;
 }
 
 static void rmem_cma_device_release(struct reserved_mem *rmem,
index 1e5ac0a796968b61e3de79a4ae2a82f9c2339916..cd9161a8b3a196bcc67634dc86bb84ae5b6190d1 100644 (file)
@@ -275,7 +275,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
 static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },  /* 0xa8d8 */
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
@@ -285,7 +285,8 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xA8DB */
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
index d1656c2f70afa43198066681f6a02ded26eab80b..1000955ce09d870db23979e3950d75e32b75cfc1 100644 (file)
@@ -132,7 +132,7 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
                                                     struct bcma_device *core)
 {
index 2671a3f02f0cfb71831b049c52da9c0f46b20848..8001e812018bbbc0c4360dacd6dc71e8029ab068 100644 (file)
@@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
 
                ret = setup_commands(nq);
                if (ret)
-                       goto err_queue;
+                       return ret;
                nullb->nr_queues++;
        }
-
        return 0;
-err_queue:
-       cleanup_queues(nullb);
-       return ret;
 }
 
 static int null_add_dev(void)
@@ -507,7 +503,9 @@ static int null_add_dev(void)
                        goto out_cleanup_queues;
                }
                blk_queue_make_request(nullb->q, null_queue_bio);
-               init_driver_queues(nullb);
+               rv = init_driver_queues(nullb);
+               if (rv)
+                       goto out_cleanup_blk_queue;
        } else {
                nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
                if (!nullb->q) {
@@ -516,7 +514,9 @@ static int null_add_dev(void)
                }
                blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
                blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
-               init_driver_queues(nullb);
+               rv = init_driver_queues(nullb);
+               if (rv)
+                       goto out_cleanup_blk_queue;
        }
 
        nullb->q->queuedata = nullb;
index 756b8ec00f16d73851bca0c8d5ec754752c58710..0ebadf93b6c5610cb0d7e46edcbdfa040719c199 100644 (file)
@@ -69,8 +69,6 @@ struct vdc_port {
        u8                      vdisk_mtype;
 
        char                    disk_name[32];
-
-       struct vio_disk_vtoc    label;
 };
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port)
        if (comp.err)
                return comp.err;
 
-       err = generic_request(port, VD_OP_GET_VTOC,
-                             &port->label, sizeof(port->label));
-       if (err < 0) {
-               printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
-               return err;
-       }
-
        if (vdc_version_supported(port, 1, 1)) {
                /* vdisk_size should be set during the handshake, if it wasn't
                 * then the underlying disk is reserved by another system
index 0e63e8aa8279a7993344f15d0d2f48b2536b497a..2ad0b5bce44be89494d9cb5f75da19b9e459cd10 100644 (file)
@@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
 {
        u64 val = 0;
        struct zram *zram = dev_to_zram(dev);
-       struct zram_meta *meta = zram->meta;
 
        down_read(&zram->init_lock);
-       if (init_done(zram))
+       if (init_done(zram)) {
+               struct zram_meta *meta = zram->meta;
                val = zs_get_total_pages(meta->mem_pool);
+       }
        up_read(&zram->init_lock);
 
        return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
        int err;
        unsigned long val;
        struct zram *zram = dev_to_zram(dev);
-       struct zram_meta *meta = zram->meta;
 
        err = kstrtoul(buf, 10, &val);
        if (err || val != 0)
                return -EINVAL;
 
        down_read(&zram->init_lock);
-       if (init_done(zram))
+       if (init_done(zram)) {
+               struct zram_meta *meta = zram->meta;
                atomic_long_set(&zram->stats.max_used_pages,
                                zs_get_total_pages(meta->mem_pool));
+       }
        up_read(&zram->init_lock);
 
        return len;
index 2133f9d59d06323bbf1f69ca80c21ddee66904f6..43005d4d334841866d11e0d503d5e68e416062ce 100644 (file)
@@ -660,11 +660,11 @@ static bool __init
 arch_timer_probed(int type, const struct of_device_id *matches)
 {
        struct device_node *dn;
-       bool probed = false;
+       bool probed = true;
 
        dn = of_find_matching_node(NULL, matches);
-       if (dn && of_device_is_available(dn) && (arch_timers_present & type))
-               probed = true;
+       if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
+               probed = false;
        of_node_put(dn);
 
        return probed;
index df6575f1430d948e4830c764be858d6138aa5db0..682288ced4acb23337899f97cbc8b3542556eb65 100644 (file)
@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
 
        if (apiexcp & UECC_EXCP_DETECTED) {
                cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
-               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
                                     pfn, offset, 0,
                                     csrow, -1, -1,
                                     mci->ctl_name, "");
index 3cda79bc8b0034d9917f6cafe457bfb67ba3815b..ece3aef16bb18e135aaa533fd32a298b8b1aa0a3 100644 (file)
@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 static void process_ce_no_info(struct mem_ctl_info *mci)
 {
        edac_dbg(3, "\n");
-       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
                             "e7xxx CE log register overflow", "");
 }
 
index 022a70273ada730a1c6afe6355387041dda36f2b..aa98b136f5d0426eb1626901ce3dedd2ff1aa137 100644 (file)
@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
                                             -1, -1,
                                             "i3000 UE", "");
                } else if (log & I3200_ECCERRLOG_CE) {
-                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
                                             0, 0, eccerrlog_syndrome(log),
                                             eccerrlog_row(channel, log),
                                             -1, -1,
-                                            "i3000 UE", "");
+                                            "i3000 CE", "");
                }
        }
 }
index 3382f6344e428b31a4659660140a7ea90e5cab28..4382343a7c60ed3cc4d5d1ea4f7661c62b1fe5bc 100644 (file)
@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
                                     dimm->location[0], dimm->location[1], -1,
                                     "i82860 UE", "");
        else
-               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
                                     info->eap, 0, info->derrsyn,
                                     dimm->location[0], dimm->location[1], -1,
                                     "i82860 CE", "");
index 84c3cb15ccdd45fbc3f15e3803c70201d163ff09..8bf61d295ffd7efd638e1f14570508e9c8498f50 100644 (file)
@@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+       [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
+       [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
+       [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
+       [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
+       [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
+       [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
 };
 
 static const char *relatives[REL_MAX + 1] = {
index cd9c9e96cf0ef838618a95438afa0794f82b1eb5..e23ab8b30626dae386ebc84e507c55a8e5dcea6c 100644 (file)
 
 #define USB_VENDOR_ID_ELAN             0x04f3
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B    0x009b
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F    0x016f
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
index 2df7fddbd119bc0cccb5f9cc2a6c4a01bbc9f7e8..725f22ca47fcb808401dc08120ad0366bb9d9d69 100644 (file)
@@ -695,7 +695,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        break;
 
                case 0x5b: /* TransducerSerialNumber */
-                       set_bit(MSC_SERIAL, input->mscbit);
+                       usage->type = EV_MSC;
+                       usage->code = MSC_SERIAL;
+                       bit = input->mscbit;
+                       max = MSC_MAX;
                        break;
 
                default:  goto unknown;
@@ -862,6 +865,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x28b: map_key_clear(KEY_FORWARDMAIL);     break;
                case 0x28c: map_key_clear(KEY_SEND);            break;
 
+               case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV);             break;
+               case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT);             break;
+               case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP);                break;
+               case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP);                break;
+               case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);   break;
+               case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);   break;
+
                default:    goto ignore;
                }
                break;
index f3cb5b0a43454e7b27b2d8c0e3ddf817aa539e6f..5014bb567b29cd8f2799873ffb869ac55a28c6ae 100644 (file)
@@ -71,6 +71,8 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
index bda5994ceb68c910097c712f5d44306840eb8727..8b72cf392b34454abbeca0c5a1c8e17aca39ba13 100644 (file)
@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
                                            &mflow->reg_id[i]);
                if (err)
-                       goto err_free;
+                       goto err_create_flow;
                i++;
        }
 
        if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
                if (err)
-                       goto err_free;
+                       goto err_create_flow;
+               i++;
        }
 
        return &mflow->ibflow;
 
+err_create_flow:
+       while (i) {
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+               i--;
+       }
 err_free:
        kfree(mflow);
        return ERR_PTR(err);
index 97afee672d07e228a350d63c88b148234e5032cb..4418119cf70788e174921c048d6d9cc7feafdc49 100644 (file)
@@ -364,6 +364,9 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
                goto out;
        }
 
+       /* create a nice device name */
+       sprintf(dev->name, "saa7146 (%d)", saa7146_num);
+
        DEB_EE("pci:%p\n", pci);
 
        err = pci_enable_device(pci);
@@ -438,9 +441,6 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
 
        /* the rest + print status message */
 
-       /* create a nice device name */
-       sprintf(dev->name, "saa7146 (%d)", saa7146_num);
-
        pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
                dev->mem, dev->revision, pci->irq,
                pci->subsystem_vendor, pci->subsystem_device);
index 13734b8c791724e8a84dc27cd5949aeaf6a3b1ca..4cb90317ff45c2bd1b5b57f5b6e0024c3b8bc29b 100644 (file)
@@ -1600,6 +1600,7 @@ static int dvb_register(struct cx23885_tsport *port)
                                break;
 
                        /* attach tuner */
+                       memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
                        m88ts2022_config.fe = fe0->dvb.frontend;
                        m88ts2022_config.clock = 27000000;
                        memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1635,6 +1636,7 @@ static int dvb_register(struct cx23885_tsport *port)
                /* port c - terrestrial/cable */
                case 2:
                        /* attach frontend */
+                       memset(&si2168_config, 0, sizeof(si2168_config));
                        si2168_config.i2c_adapter = &adapter;
                        si2168_config.fe = &fe0->dvb.frontend;
                        si2168_config.ts_mode = SI2168_TS_SERIAL;
@@ -1654,6 +1656,7 @@ static int dvb_register(struct cx23885_tsport *port)
                        port->i2c_client_demod = client_demod;
 
                        /* attach tuner */
+                       memset(&si2157_config, 0, sizeof(si2157_config));
                        si2157_config.fe = fe0->dvb.frontend;
                        memset(&info, 0, sizeof(struct i2c_board_info));
                        strlcpy(info.type, "si2157", I2C_NAME_SIZE);
index 5425ba1e320d9a2c6bbaa5dd8686ee31372b63c7..95d5d520204884aba7ea0d874376e9136a9f0759 100644 (file)
@@ -1,7 +1,6 @@
 config VIDEO_TW68
        tristate "Techwell tw68x Video For Linux"
        depends on VIDEO_DEV && PCI && VIDEO_V4L2
-       select I2C_ALGOBIT
        select VIDEOBUF2_DMA_SG
        ---help---
          Support for Techwell tw68xx based frame grabber boards.
index a6fb48cf7aaed62fb7b845395d7349bb46916c34..63f0b64057cbf88146a83091bb8b70ccc1857b72 100644 (file)
@@ -306,7 +306,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
 
        /* get irq */
        err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
-                         IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+                         IRQF_SHARED, dev->name, dev);
        if (err < 0) {
                pr_err("%s: can't get IRQ %d\n",
                       dev->name, pci_dev->irq);
index bee9074ebc138b28a0b12bc38c398d720eadb463..3aac88f1d54ae518a37998c1eb92dfb32020c158 100644 (file)
@@ -166,7 +166,7 @@ config VIDEO_MEM2MEM_DEINTERLACE
 config VIDEO_SAMSUNG_S5P_G2D
        tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
        depends on VIDEO_DEV && VIDEO_V4L2
-       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on HAS_DMA
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
@@ -178,7 +178,7 @@ config VIDEO_SAMSUNG_S5P_G2D
 config VIDEO_SAMSUNG_S5P_JPEG
        tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
        depends on VIDEO_DEV && VIDEO_V4L2
-       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on HAS_DMA
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
@@ -189,7 +189,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
 config VIDEO_SAMSUNG_S5P_MFC
        tristate "Samsung S5P MFC Video Codec"
        depends on VIDEO_DEV && VIDEO_V4L2
-       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on HAS_DMA
        select VIDEOBUF2_DMA_CONTIG
        default n
index 77c95123774409ac491d45d8db465fdb9c72360b..b7b2e472240ad504c30018487bc7712691f3ef17 100644 (file)
@@ -2,7 +2,7 @@
 config VIDEO_SAMSUNG_EXYNOS4_IS
        bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
        depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
-       depends on (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+       depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on OF && COMMON_CLK
        help
          Say Y here to enable camera host interface devices for
index b70fd996d7946e5a1761fc684362972b4f5899c6..aee92d908e49e457ee7baffe41634155ab069da5 100644 (file)
@@ -832,6 +832,7 @@ err:
        return -ENXIO;
 }
 
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 static int fimc_m2m_suspend(struct fimc_dev *fimc)
 {
        unsigned long flags;
@@ -870,6 +871,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
 
        return 0;
 }
+#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
 
 static const struct of_device_id fimc_of_match[];
 
index e525a7c8d885770c7f0e85e3219a387f432a13c2..6fcc7f072acea885816d30c95068c77942b17fde 100644 (file)
@@ -893,7 +893,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                               unsigned long buffer, unsigned long size,
                               struct s5p_jpeg_ctx *ctx)
 {
-       int c, components, notfound;
+       int c, components = 0, notfound;
        unsigned int height, width, word, subsampling = 0;
        long length;
        struct s5p_jpeg_buffer jpeg_buffer;
@@ -2632,6 +2632,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
        return 0;
 }
 
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 static int s5p_jpeg_runtime_suspend(struct device *dev)
 {
        struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
@@ -2681,7 +2682,9 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
 
        return 0;
 }
+#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
 
+#ifdef CONFIG_PM_SLEEP
 static int s5p_jpeg_suspend(struct device *dev)
 {
        if (pm_runtime_suspended(dev))
@@ -2697,6 +2700,7 @@ static int s5p_jpeg_resume(struct device *dev)
 
        return s5p_jpeg_runtime_resume(dev);
 }
+#endif
 
 static const struct dev_pm_ops s5p_jpeg_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
index a9d56f8936b4e576d2e4552b1d86323d15801da5..beb180e71ba0e1022611dc3bdb1eacc9e14478f4 100644 (file)
@@ -9,7 +9,7 @@
 config VIDEO_SAMSUNG_S5P_TV
        bool "Samsung TV driver for S5P platform"
        depends on PM_RUNTIME
-       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        default n
        ---help---
          Say Y here to enable selecting the TV output devices for
index d71139a2ae0049526aa810512553d56942025307..c3090932f06d56cde74f3f2ed409325b3aff1d36 100644 (file)
@@ -1,8 +1,11 @@
 config VIDEO_VIVID
        tristate "Virtual Video Test Driver"
-       depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+       depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
        select FONT_SUPPORT
        select FONT_8x16
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
        select VIDEOBUF2_VMALLOC
        default n
        ---help---
index 0c6fa53fa64614f34090e076717c43e094150230..cbcd6250e7b2afc3feec4456337700e3ae11b300 100644 (file)
@@ -136,7 +136,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
                tpg->black_line[plane] = vzalloc(max_w * pixelsz);
                if (!tpg->black_line[plane])
                        return -ENOMEM;
-               tpg->random_line[plane] = vzalloc(max_w * pixelsz);
+               tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz);
                if (!tpg->random_line[plane])
                        return -ENOMEM;
        }
index 6f28f6e02ea57e947a2a8805ccccee4cfaa30cc2..704397f3c106b6419ed7a3c88b8dcb2f7ae3a902 100644 (file)
@@ -1256,7 +1256,7 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
                fmerr("Unable to read firmware(%s) content\n", fw_name);
                return ret;
        }
-       fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size);
+       fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
 
        fw_data = (void *)fw_entry->data;
        fw_len = fw_entry->size;
index e44c8aba6074c0667181d9e7a7daab2f7db4d1bc..803a0e63d47eef2ee40e1fb1ce7fbec21cd7d9ab 100644 (file)
@@ -1333,9 +1333,9 @@ static int xc5000_release(struct dvb_frontend *fe)
 
        if (priv) {
                cancel_delayed_work(&priv->timer_sleep);
-               hybrid_tuner_release_state(priv);
                if (priv->firmware)
                        release_firmware(priv->firmware);
+               hybrid_tuner_release_state(priv);
        }
 
        mutex_unlock(&xc5000_list_mutex);
index 00758c83eec733475be5596818a5347868cde089..1896ab218b117beaaa229848b0b9c72f32da5e0b 100644 (file)
@@ -193,8 +193,8 @@ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
        return af9035_wr_regs(d, reg, &val, 1);
 }
 
-static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
-               void *platform_data, struct i2c_adapter *adapter)
+static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
+               u8 addr, void *platform_data, struct i2c_adapter *adapter)
 {
        int ret, num;
        struct state *state = d_to_priv(d);
@@ -221,7 +221,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
                goto err;
        }
 
-       request_module(board_info.type);
+       request_module("%s", board_info.type);
 
        /* register I2C device */
        client = i2c_new_device(adapter, &board_info);
index d3c5f230e97a7272f69e7cbba367c3a18c9cbe8b..ae917c042a52e7737a74e536328906014dbe34aa 100644 (file)
@@ -630,8 +630,8 @@ error:
        return ret;
 }
 
-static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
-               void *platform_data)
+static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type,
+               u8 addr, void *platform_data)
 {
        int ret, num;
        struct anysee_state *state = d_to_priv(d);
@@ -659,7 +659,7 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
                goto err;
        }
 
-       request_module(board_info.type);
+       request_module("%s", board_info.type);
 
        /* register I2C device */
        client = i2c_new_device(adapter, &board_info);
index b5e52fe7957aff0d047abb1f89caab275fcb1afd..901cf2b952d786a80bfed6a60d2b28cd49328ce9 100644 (file)
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(em28xx_audio_analog_set);
 int em28xx_audio_setup(struct em28xx *dev)
 {
        int vid1, vid2, feat, cfg;
-       u32 vid;
+       u32 vid = 0;
        u8 i2s_samplerates;
 
        if (dev->chip_id == CHIP_ID_EM2870 ||
index 581f6dad4ca9deef0fa5a0820372238b8a4e3bbd..23f8f6afa2e061a783ee9fa5c0d244c9c6018dcf 100644 (file)
@@ -712,8 +712,10 @@ static int em28xx_ir_init(struct em28xx *dev)
        em28xx_info("Registering input extension\n");
 
        ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+       if (!ir)
+               return -ENOMEM;
        rc = rc_allocate_device();
-       if (!ir || !rc)
+       if (!rc)
                goto error;
 
        /* record handles to ourself */
index 328b5ba47a0a20b1f93ade77a74c0c9fb4e06cfe..fd1fa412e09462362d8b4520ee64c6bb1d2979e5 100644 (file)
@@ -932,7 +932,7 @@ static int hackrf_set_bandwidth(struct hackrf_dev *dev)
        dev->bandwidth->val = bandwidth;
        dev->bandwidth->cur.val = bandwidth;
 
-       dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq);
+       dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth);
 
        u16tmp = 0;
        u16tmp |= ((bandwidth >> 0) & 0xff) << 0;
index 68bc9615660efaf584bc1361b54706a5ef2b4df1..9bfa041e33161be93333ad0133acc1adbb957b4e 100644 (file)
@@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file)
        if (usbvision->remove_pending) {
                printk(KERN_INFO "%s: Final disconnect\n", __func__);
                usbvision_release(usbvision);
+               return 0;
        }
        mutex_unlock(&usbvision->v4l2_lock);
 
@@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file)
        if (usbvision->remove_pending) {
                printk(KERN_INFO "%s: Final disconnect\n", __func__);
                usbvision_release(usbvision);
+               return err_code;
        }
 
        mutex_unlock(&usbvision->v4l2_lock);
index 60a8e2c3631e0b155b241e624092ccef271ae030..378ae02e593b93a19bb9010a23f62175029bbefd 100644 (file)
@@ -318,7 +318,6 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
        stream->ctrl = probe;
        stream->cur_format = format;
        stream->cur_frame = frame;
-       stream->frame_size = fmt->fmt.pix.sizeimage;
 
 done:
        mutex_unlock(&stream->mutex);
index 9ace520bb079792b840d8aff38b68945fef0eeb0..df81b9c4faf12ae0e00f6267d380d76608b4c8ba 100644 (file)
@@ -1143,7 +1143,7 @@ static int uvc_video_encode_data(struct uvc_streaming *stream,
 static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
                                      struct uvc_buffer *buf)
 {
-       if (stream->frame_size != buf->bytesused &&
+       if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused &&
            !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED))
                buf->error = 1;
 }
index 6f676c29ec09fd997b8833d78df1b8b72fdf0d91..864ada74036033fea9c2956ac4980b646163bbc0 100644 (file)
@@ -457,7 +457,6 @@ struct uvc_streaming {
        struct uvc_format *def_format;
        struct uvc_format *cur_format;
        struct uvc_frame *cur_frame;
-       size_t frame_size;
 
        /* Protect access to ctrl, cur_format, cur_frame and hardware video
         * probe control.
index bf80f0f7dfb853ab367887fb76cfc1d0be6c907e..e02353e340dd78d07d0750b05af0533ffad35dae 100644 (file)
@@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
        /* Try to remap memory */
        size = vma->vm_end - vma->vm_start;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       /* the "vm_pgoff" is just used in v4l2 to find the
+        * corresponding buffer data structure which is allocated
+        * earlier and it does not mean the offset from the physical
+        * buffer start address as usual. So set it to 0 to pass
+        * the sanity check in vm_iomap_memory().
+        */
+       vma->vm_pgoff = 0;
+
        retval = vm_iomap_memory(vma, mem->dma_handle, size);
        if (retval) {
                dev_err(q->dev, "mmap: remap failed with error %d. ",
index 69506ebd4d0765283d2a108cb8912e0a246323fc..c99e896604ee35909e0a2d55bbd95fd31caddbea 100644 (file)
 
 #include "cxl.h"
 
-static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group,
-                                      bool sec_hash,
-                                      struct cxl_sste *secondary_group,
-                                      unsigned int *lru)
+static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
 {
-       unsigned int i, entry;
-       struct cxl_sste *sste, *group = primary_group;
-
-       for (i = 0; i < 2; i++) {
-               for (entry = 0; entry < 8; entry++) {
-                       sste = group + entry;
-                       if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
-                               return sste;
-               }
-               if (!sec_hash)
-                       break;
-               group = secondary_group;
+       return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
+               (sste->esid_data == cpu_to_be64(slb->esid)));
+}
+
+/*
+ * This finds a free SSTE for the given SLB, or returns NULL if it's already in
+ * the segment table.
+ */
+static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
+                                      struct copro_slb *slb)
+{
+       struct cxl_sste *primary, *sste, *ret = NULL;
+       unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
+       unsigned int entry;
+       unsigned int hash;
+
+       if (slb->vsid & SLB_VSID_B_1T)
+               hash = (slb->esid >> SID_SHIFT_1T) & mask;
+       else /* 256M */
+               hash = (slb->esid >> SID_SHIFT) & mask;
+
+       primary = ctx->sstp + (hash << 3);
+
+       for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
+               if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
+                       ret = sste;
+               if (sste_matches(sste, slb))
+                       return NULL;
        }
+       if (ret)
+               return ret;
+
        /* Nothing free, select an entry to cast out */
-       if (sec_hash && (*lru & 0x8))
-               sste = secondary_group + (*lru & 0x7);
-       else
-               sste = primary_group + (*lru & 0x7);
-       *lru = (*lru + 1) & 0xf;
+       ret = primary + ctx->sst_lru;
+       ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
 
-       return sste;
+       return ret;
 }
 
 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
 {
        /* mask is the group index, we search primary and secondary here. */
-       unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
-       bool sec_hash = 1;
        struct cxl_sste *sste;
-       unsigned int hash;
        unsigned long flags;
 
-
-       sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
-
-       if (slb->vsid & SLB_VSID_B_1T)
-               hash = (slb->esid >> SID_SHIFT_1T) & mask;
-       else /* 256M */
-               hash = (slb->esid >> SID_SHIFT) & mask;
-
        spin_lock_irqsave(&ctx->sste_lock, flags);
-       sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash,
-                             ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru);
+       sste = find_free_sste(ctx, slb);
+       if (!sste)
+               goto out_unlock;
 
        pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
                        sste - ctx->sstp, slb->vsid, slb->esid);
 
        sste->vsid_data = cpu_to_be64(slb->vsid);
        sste->esid_data = cpu_to_be64(slb->esid);
+out_unlock:
        spin_unlock_irqrestore(&ctx->sste_lock, flags);
 }
 
index 623286a77114946aa60ae05c87bf21b781fd24b6..d47532e8f4f16ca4ebd64d3a1bd33fe106a5c5c8 100644 (file)
@@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
        ctx->elem->haurp = 0; /* disable */
        ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
 
-       sr = CXL_PSL_SR_An_SC;
+       sr = 0;
        if (ctx->master)
                sr |= CXL_PSL_SR_An_MP;
        if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
        u64 sr;
        int rc;
 
-       sr = CXL_PSL_SR_An_SC;
+       sr = 0;
        set_endian(sr);
        if (ctx->master)
                sr |= CXL_PSL_SR_An_MP;
index 4706386b7d34c2d719e30eb602defd49ee92a72d..f9009be3f3077057222984bbf6d03fdac8fcbe7c 100644 (file)
@@ -135,6 +135,7 @@ config MACVLAN
 config MACVTAP
        tristate "MAC-VLAN based tap driver"
        depends on MACVLAN
+       depends on INET
        help
          This adds a specialized tap character device driver that is based
          on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -200,6 +201,7 @@ config RIONET_RX_SIZE
 
 config TUN
        tristate "Universal TUN/TAP device driver support"
+       depends on INET
        select CRC32
        ---help---
          TUN/TAP provides packet reception and transmission for user space
index 1020a7af67cf6d8bbaf5016d2367454c63b393da..78d8e876f3aaada8ae88c429d007d4c8e3361633 100644 (file)
@@ -395,7 +395,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds)
 }
 
 struct dsa_switch_driver mv88e6171_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_DSA,
+       .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6171_probe,
        .setup                  = mv88e6171_setup,
index 29554992215aa18301f26b7e431b1fc19dfff9cb..2349ea9702557b95de0860eb5921589104cd5c9d 100644 (file)
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int rxcsum, rxvlan, rxvlan_filter;
+       netdev_features_t rxcsum, rxvlan, rxvlan_filter;
 
        rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
        rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        struct skb_shared_hwtstamps *hwtstamps;
        unsigned int incomplete, error, context_next, context;
        unsigned int len, put_len, max_len;
-       int received = 0;
+       unsigned int received = 0;
+       int packet_count = 0;
 
        DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
 
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 
        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        packet = &ring->packet_data;
-       while (received < budget) {
+       while (packet_count < budget) {
                DBGPR("  cur = %d\n", ring->cur);
 
                /* First time in loop see if we need to restore state */
@@ -1662,7 +1663,7 @@ read_again:
                        if (packet->errors)
                                DBGPR("Error in received packet\n");
                        dev_kfree_skb(skb);
-                       continue;
+                       goto next_packet;
                }
 
                if (!context) {
@@ -1677,7 +1678,7 @@ read_again:
                                        }
 
                                        dev_kfree_skb(skb);
-                                       continue;
+                                       goto next_packet;
                                }
                                memcpy(skb_tail_pointer(skb), rdata->skb->data,
                                       put_len);
@@ -1694,7 +1695,7 @@ read_again:
 
                /* Stray Context Descriptor? */
                if (!skb)
-                       continue;
+                       goto next_packet;
 
                /* Be sure we don't exceed the configured MTU */
                max_len = netdev->mtu + ETH_HLEN;
@@ -1705,7 +1706,7 @@ read_again:
                if (skb->len > max_len) {
                        DBGPR("packet length exceeds configured MTU\n");
                        dev_kfree_skb(skb);
-                       continue;
+                       goto next_packet;
                }
 
 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
@@ -1739,6 +1740,9 @@ read_again:
 
                netdev->last_rx = jiffies;
                napi_gro_receive(&pdata->napi, skb);
+
+next_packet:
+               packet_count++;
        }
 
        /* Check if we need to save state before leaving */
@@ -1752,9 +1756,9 @@ read_again:
                rdata->state.error = error;
        }
 
-       DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+       DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
 
-       return received;
+       return packet_count;
 }
 
 static int xgbe_poll(struct napi_struct *napi, int budget)
index e6d24c2101982444bde4d500d7559633e8685a2e..c22f32622fa9a50ad3789d8000991ede58502ca0 100644 (file)
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
 {
        struct net_device *ndev = p->ndev;
        u32 data;
-       int i;
+       int i = 0;
 
        xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
-       for (i = 0; i < 10 && data != ~0U ; i++) {
+       do {
                usleep_range(100, 110);
                data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
-       }
+               if (data == ~0U)
+                       return 0;
+       } while (++i < 10);
 
-       if (data != ~0U) {
-               netdev_err(ndev, "Failed to release memory from shutdown\n");
-               return -ENODEV;
-       }
-
-       return 0;
+       netdev_err(ndev, "Failed to release memory from shutdown\n");
+       return -ENODEV;
 }
 
 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
index 9ae36979bdee010aa2e8a276dc17611593e3383c..3a6778a667f4558f52f1327f413e088882490f7d 100644 (file)
@@ -1397,6 +1397,9 @@ static void bcm_sysport_netif_start(struct net_device *dev)
        /* Enable NAPI */
        napi_enable(&priv->napi);
 
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
        phy_start(priv->phydev);
 
        /* Enable TX interrupts for the 32 TXQs */
@@ -1499,9 +1502,6 @@ static int bcm_sysport_open(struct net_device *dev)
        if (ret)
                goto out_free_rx_ring;
 
-       /* Enable RX interrupt and TX ring full interrupt */
-       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
        /* Turn on TDMA */
        ret = tdma_enable_set(priv, 1);
        if (ret)
@@ -1858,6 +1858,8 @@ static int bcm_sysport_resume(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       umac_reset(priv);
+
        /* We may have been suspended and never received a WOL event that
         * would turn off MPD detection, take care of that now
         */
@@ -1885,9 +1887,6 @@ static int bcm_sysport_resume(struct device *d)
 
        netif_device_attach(dev);
 
-       /* Enable RX interrupt and TX ring full interrupt */
-       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
        /* RX pipe enable */
        topctrl_writel(priv, 0, RX_FLUSH_CNTL);
 
index 23f23c97c2ad7b9fa52b8d589b1627a560635f92..f05fab65d78ac62b3b7905102d39893087b1450b 100644 (file)
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
                if (l5_cid >= MAX_CM_SK_TBL_SZ)
                        break;
 
-               rcu_read_lock();
                if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
                        rc = -ENODEV;
-                       rcu_read_unlock();
                        break;
                }
                csk = &cp->csk_tbl[l5_cid];
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
                        }
                }
                csk_put(csk);
-               rcu_read_unlock();
                rc = 0;
        }
        }
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
                cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 
        mutex_lock(&cnic_lock);
-       if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+       if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
                RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
                cnic_put(dev);
        } else {
index 8edf0f5bd679bf24b366887ff973a5d454c3af73..6fe300e316c3c41e398b06f1864cfdc7f1f0ebd6 100644 (file)
@@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev)
        dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
 }
 
+static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       struct port_dcb_info *dcb = &pi->dcb;
+       struct dcb_app app;
+       int i, err;
+
+       /* zero priority implies remove */
+       app.priority = 0;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               /* Check if app list is exhausted */
+               if (!dcb->app_priority[i].protocolid)
+                       break;
+
+               app.protocol = dcb->app_priority[i].protocolid;
+
+               if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+                       app.selector = dcb->app_priority[i].sel_field + 1;
+                       err = dcb_ieee_setapp(dev, &app);
+               } else {
+                       app.selector = !!(dcb->app_priority[i].sel_field);
+                       err = dcb_setapp(dev, &app);
+               }
+
+               if (err) {
+                       dev_err(adap->pdev_dev,
+                               "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+                               dcb_ver_array[dcb->dcb_version], app.selector,
+                               app.protocol, -err);
+                       break;
+               }
+       }
+}
+
 /* Finite State machine for Data Center Bridging.
  */
 void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -80,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                        /* we're going to use Host DCB */
                        dcb->state = CXGB4_DCB_STATE_HOST;
                        dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
-                       dcb->enabled = 1;
                        break;
                }
 
@@ -145,6 +180,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                         * state.  We need to reset back to a ground state
                         * of incomplete.
                         */
+                       cxgb4_dcb_cleanup_apps(dev);
                        cxgb4_dcb_state_init(dev);
                        dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
                        dcb->supported = CXGB4_DCBX_FW_SUPPORT;
@@ -349,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
 {
        struct port_info *pi = netdev2pinfo(dev);
 
+       /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
+               pi->dcb.enabled = enabled;
+               return 0;
+       }
+
        /* Firmware doesn't provide any mechanism to control the DCB state.
         */
        if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
@@ -833,11 +875,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
 
 /* Return whether IEEE Data Center Bridging has been negotiated.
  */
-static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev)
+static inline int
+cxgb4_ieee_negotiation_complete(struct net_device *dev,
+                               enum cxgb4_dcb_fw_msgs dcb_subtype)
 {
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
+       if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+               return 0;
+
        return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
                (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
@@ -850,7 +897,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
 {
        int prio;
 
-       if (!cxgb4_ieee_negotiation_complete(dev))
+       if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
                return -EINVAL;
        if (!(app->selector && app->protocol))
                return -EINVAL;
@@ -872,7 +919,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
 {
        int ret;
 
-       if (!cxgb4_ieee_negotiation_complete(dev))
+       if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
                return -EINVAL;
        if (!(app->selector && app->protocol))
                return -EINVAL;
index 3f60070f2519d2f1df97eb2d9dd39bfae0c0ae92..8520d5529df872fad60a377231f5154fcf91a4e3 100644 (file)
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
 #ifdef CONFIG_CHELSIO_T4_DCB
        struct port_info *pi = netdev_priv(dev);
 
-       return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
+       if (!pi->dcb.enabled)
+               return 0;
+
+       return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+               (pi->dcb.state == CXGB4_DCB_STATE_HOST));
 #else
        return 0;
 #endif
@@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->tid_release_lock);
+       spin_lock_init(&adapter->win0_lock);
 
        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
        INIT_WORK(&adapter->db_full_task, process_db_full);
index bfa398d9182681c92203f5a7687010e5fdcad7d8..0b42bddaf28443dbdc9317feee3dc43fcab8ade9 100644 (file)
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
        CH_DEVICE(0x480d),      /* T480-cr */
        CH_DEVICE(0x480e),      /* T440-lp-cr */
        CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
+       CH_DEVICE(0x4881),
+       CH_DEVICE(0x4882),
+       CH_DEVICE(0x4883),
+       CH_DEVICE(0x4884),
+       CH_DEVICE(0x4885),
+       CH_DEVICE(0x4886),
+       CH_DEVICE(0x4887),
+       CH_DEVICE(0x4888),
        CH_DEVICE(0x5801),      /* T520-cr */
        CH_DEVICE(0x5802),      /* T522-cr */
        CH_DEVICE(0x5803),      /* T540-cr */
index 69dfd3c9e5298a8519792cb8fcb77fe436d1847f..0be6850be8a2383e11240093bd421dfbd2595595 100644 (file)
@@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
        int i;
 
        enic_rfs_timer_stop(enic);
-       spin_lock(&enic->rfs_h.lock);
+       spin_lock_bh(&enic->rfs_h.lock);
        enic->rfs_h.free = 0;
        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
                struct hlist_head *hhead;
@@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
                        kfree(n);
                }
        }
-       spin_unlock(&enic->rfs_h.lock);
+       spin_unlock_bh(&enic->rfs_h.lock);
 }
 
 struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
@@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data)
        bool res;
        int j;
 
-       spin_lock(&enic->rfs_h.lock);
+       spin_lock_bh(&enic->rfs_h.lock);
        for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
                struct hlist_head *hhead;
                struct hlist_node *tmp;
@@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data)
                        }
                }
        }
-       spin_unlock(&enic->rfs_h.lock);
+       spin_unlock_bh(&enic->rfs_h.lock);
        mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
 }
 
@@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                return -EPROTONOSUPPORT;
 
        tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
-       spin_lock(&enic->rfs_h.lock);
+       spin_lock_bh(&enic->rfs_h.lock);
        n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
 
        if (n) { /* entry already present  */
@@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        }
 
 ret_unlock:
-       spin_unlock(&enic->rfs_h.lock);
+       spin_unlock_bh(&enic->rfs_h.lock);
        return res;
 }
 
index 929bfe70080ac040bd879ede472d72744aaa300c..180e53fa628face1c64b5306afdf5c95aaca89a6 100644 (file)
@@ -1674,13 +1674,13 @@ static int enic_stop(struct net_device *netdev)
 
        enic_dev_disable(enic);
 
-       local_bh_disable();
        for (i = 0; i < enic->rq_count; i++) {
                napi_disable(&enic->napi[i]);
+               local_bh_disable();
                while (!enic_poll_lock_napi(&enic->rq[i]))
                        mdelay(1);
+               local_bh_enable();
        }
-       local_bh_enable();
 
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
index 81b96cf875740f6faa418885718dcc78c2113570..50a851db2852e28979a0b78f481347fc9e29778a 100644 (file)
@@ -1581,7 +1581,8 @@ fec_enet_interrupt(int irq, void *dev_id)
                complete(&fep->mdio_done);
        }
 
-       fec_ptp_check_pps_event(fep);
+       if (fep->ptp_clock)
+               fec_ptp_check_pps_event(fep);
 
        return ret;
 }
index 3d4e08be170970b7f981c19a23e65c35f7ae860b..b34214e2df5f6e55bdbb29e6a8016c139e74345c 100644 (file)
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev)
                FC(fecp, x_cntrl, FEC_TCNTRL_FDEN);     /* FD disable */
        }
 
+       /* Restore multicast and promiscuous settings */
+       set_multicast_list(dev);
+
        /*
         * Enable interrupts we wish to service.
         */
index f30411f0701f694175392198941b0d69d3798682..7a184e8816a48f9eb74f4890b60b32345dbcf5f1 100644 (file)
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev)
        if (fep->phydev->duplex)
                S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
 
+       /* Restore multicast and promiscuous settings */
+       set_multicast_list(dev);
+
        S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
 }
 
index 5f6aded512f539f9a12cb692f810fd3d8e560a3f..24f3986cfae2950f582d3a5e4644c4e1b666391e 100644 (file)
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                  NETIF_F_HW_CSUM |
                                  NETIF_F_SG);
 
-       netdev->priv_flags |= IFF_UNICAST_FLT;
+       /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
+       if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
+           hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
+               netdev->priv_flags |= IFF_UNICAST_FLT;
 
        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
 
index ed5f1c15fb0f4cd5fe7ce797920fd3b6cd21f7df..c3a7f4a4b7757a35afa4a23b944a64e0ac4d673b 100644 (file)
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                                I40E_GL_MDET_TX_PF_NUM_SHIFT;
                u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
                                I40E_GL_MDET_TX_VF_NUM_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
                                I40E_GL_MDET_TX_EVENT_SHIFT;
                u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
                                I40E_GL_MDET_TX_QUEUE_SHIFT;
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        if (reg & I40E_GL_MDET_RX_VALID_MASK) {
                u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
                                I40E_GL_MDET_RX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
                                I40E_GL_MDET_RX_EVENT_SHIFT;
                u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
                                I40E_GL_MDET_RX_QUEUE_SHIFT;
index a21b14495ebd0d7b953a1d8e4b2319d23452b95e..a2d72a87cbde40465c16277e32d4a242a2873ee3 100644 (file)
@@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
        if (unlikely(page_to_nid(page) != numa_node_id()))
                return false;
 
+       if (unlikely(page->pfmemalloc))
+               return false;
+
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
        if (unlikely(page_count(page) != 1))
@@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* we can reuse buffer as-is, just make sure it is local */
-               if (likely(page_to_nid(page) == numa_node_id()))
+               if (likely((page_to_nid(page) == numa_node_id()) &&
+                          !page->pfmemalloc))
                        return true;
 
                /* this page cannot be reused so discard it */
index 3ce4a258f94534da25304c9138e0bf28fdd3c9ff..0ae038b9af90cfc7dc15bd01c0b4014e37fa1911 100644 (file)
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
                if (old == advertised)
                        return err;
                /* this sets the link speed and restarts auto-neg */
+               while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+                       usleep_range(1000, 2000);
+
                hw->mac.autotry_restart = true;
                err = hw->mac.ops.setup_link(hw, advertised, true);
                if (err) {
                        e_info(probe, "setup link failed with code %d\n", err);
                        hw->mac.ops.setup_link(hw, old, true);
                }
+               clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
        } else {
                /* in this case we currently only support 10Gb/FULL */
                u32 speed = ethtool_cmd_speed(ecmd);
index fec5212d43374835156049f8b7c5f0bc828dcdde..d2df4e3d1032496dbf294f4d7b0b741ddfaac6d8 100644 (file)
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                                IXGBE_CB(skb)->page_released = false;
                        }
                        dev_kfree_skb(skb);
+                       rx_buffer->skb = NULL;
                }
-               rx_buffer->skb = NULL;
                if (rx_buffer->dma)
                        dma_unmap_page(dev, rx_buffer->dma,
                                       ixgbe_rx_pg_size(rx_ring),
index 34c137878545fc672dad1a3d86e11c034c0ac368..454d9fea640ecb0aa849b8d8106695d017ca3309 100644 (file)
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
         * whether LSO is used */
        tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
-                                                        MLX4_WQE_CTRL_TCP_UDP_CSUM);
+               if (!skb->encapsulation)
+                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+                                                                MLX4_WQE_CTRL_TCP_UDP_CSUM);
+               else
+                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
                ring->tx_csum++;
        }
 
index a49c9d11d8a58e5a8bcc5419133a7909204a0a41..49290a4059039a72c86e119c59d2d03ec6c3eb4c 100644 (file)
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
                                pr_cont("\n");
                }
        }
+       synchronize_irq(eq->irq);
 
        mlx4_mtt_cleanup(dev, &eq->mtt);
        for (i = 0; i < npages; ++i)
index ca0f98c951054945cd29fc3be2284fc7eeb7d729..872843179f44af3c7442614477bd2af131118347 100644 (file)
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
                                        cur->ib.dst_gid_msk);
                        break;
 
+               case MLX4_NET_TRANS_RULE_ID_VXLAN:
+                       len += snprintf(buf + len, BUF_SIZE - len,
+                                       "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
+                       break;
                case MLX4_NET_TRANS_RULE_ID_IPV6:
                        break;
 
index ed53291468f3226e644aa039025a962aff0b0fe0..a278238a2db643ba04b924dd9920a284abcd8908 100644 (file)
@@ -420,6 +420,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
        if (err)
                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
                               eq->eqn);
+       synchronize_irq(table->msix_arr[eq->irqn].vector);
        mlx5_buf_free(dev, &eq->buf);
 
        return err;
index ee84a90e371c5dedf2fa5b0b31ba520d8611c85b..aaf2987512b5db7472bdef518c4aa4c510612ef7 100644 (file)
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        unsigned short dma_flags;
        int i = 0;
 
-       EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
        if (skb_shinfo(skb)->gso_size)
                return efx_enqueue_skb_tso(tx_queue, skb);
 
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        /* Find the packet protocol and sanity-check it */
        state.protocol = efx_tso_check_protocol(skb);
 
-       EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
        rc = tso_start(&state, efx, skb);
        if (rc)
                goto mem_err;
index 5e94d00b96b301d9dc9cd09a17943520b96e8b52..2c62208077fe7a442aa1b5340e0e614d3b247c21 100644 (file)
@@ -81,6 +81,7 @@ static const char version[] =
 #include <linux/workqueue.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_gpio.h>
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, smc91x_match);
+
+/**
+ * of_try_set_control_gpio - configure a gpio if it exists
+ */
+static int try_toggle_control_gpio(struct device *dev,
+                                  struct gpio_desc **desc,
+                                  const char *name, int index,
+                                  int value, unsigned int nsdelay)
+{
+       struct gpio_desc *gpio = *desc;
+       int res;
+
+       gpio = devm_gpiod_get_index(dev, name, index);
+       if (IS_ERR(gpio)) {
+               if (PTR_ERR(gpio) == -ENOENT) {
+                       *desc = NULL;
+                       return 0;
+               }
+
+               return PTR_ERR(gpio);
+       }
+       res = gpiod_direction_output(gpio, !value);
+       if (res) {
+               dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
+               devm_gpiod_put(dev, gpio);
+               gpio = NULL;
+               return res;
+       }
+       if (nsdelay)
+               usleep_range(nsdelay, 2 * nsdelay);
+       gpiod_set_value_cansleep(gpio, value);
+       *desc = gpio;
+
+       return 0;
+}
 #endif
 
 /*
@@ -2237,6 +2273,28 @@ static int smc_drv_probe(struct platform_device *pdev)
                struct device_node *np = pdev->dev.of_node;
                u32 val;
 
+               /* Optional pwrdwn GPIO configured? */
+               ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
+                                             "power", 0, 0, 100);
+               if (ret)
+                       return ret;
+
+               /*
+                * Optional reset GPIO configured? Minimum 100 ns reset needed
+                * according to LAN91C96 datasheet page 14.
+                */
+               ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
+                                             "reset", 0, 0, 100);
+               if (ret)
+                       return ret;
+
+               /*
+                * Need to wait for optional EEPROM to load, max 750 us according
+                * to LAN91C96 datasheet page 55.
+                */
+               if (lp->reset_gpio)
+                       usleep_range(750, 1000);
+
                /* Combination of IO widths supported, default to 16-bit */
                if (!of_property_read_u32(np, "reg-io-width", &val)) {
                        if (val & 1)
index 47dce918eb0f49741a9dec7ae3cee68354fb7a0a..2a38dacbbd27fba4f153790117cb10102dadd6e6 100644 (file)
@@ -298,6 +298,9 @@ struct smc_local {
        struct sk_buff *pending_tx_skb;
        struct tasklet_struct tx_task;
 
+       struct gpio_desc *power_gpio;
+       struct gpio_desc *reset_gpio;
+
        /* version/revision of the SMC91x chip */
        int     version;
 
index 655a23bbc45113595be3c049a05003382a6de049..e17a970eaf2b87a1ff184270835102a85fa5f1f4 100644 (file)
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
 static void stmmac_default_data(void)
 {
        memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+
        plat_dat.bus_id = 1;
        plat_dat.phy_addr = 0;
        plat_dat.interface = PHY_INTERFACE_MODE_GMII;
@@ -47,6 +48,12 @@ static void stmmac_default_data(void)
        dma_cfg.pbl = 32;
        dma_cfg.burst_len = DMA_AXI_BLEN_256;
        plat_dat.dma_cfg = &dma_cfg;
+
+       /* Set default value for multicast hash bins */
+       plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat_dat.unicast_filter_entries = 1;
 }
 
 /**
index 952e1e4764b74d64688875815b25dc90316f3214..d8794488f80a78cbc4171c7ac57d942eb36ab521 100644 (file)
@@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
                if (enable) {
                        unsigned long timeout = jiffies + HZ;
 
-                       /* Disable Learn for all ports */
-                       for (i = 0; i < priv->data.slaves; i++) {
+                       /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
+                       for (i = 0; i <= priv->data.slaves; i++) {
                                cpsw_ale_control_set(ale, i,
                                                     ALE_PORT_NOLEARN, 1);
                                cpsw_ale_control_set(ale, i,
@@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
                        dev_dbg(&ndev->dev, "promiscuity enabled\n");
                } else {
-                       /* Flood All Unicast Packets to Host port */
+                       /* Don't Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
 
-                       /* Enable Learn for all ports */
-                       for (i = 0; i < priv->data.slaves; i++) {
+                       /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
+                       for (i = 0; i <= priv->data.slaves; i++) {
                                cpsw_ale_control_set(ale, i,
                                                     ALE_PORT_NOLEARN, 0);
                                cpsw_ale_control_set(ale, i,
@@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        if (ndev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
                cpsw_set_promiscious(ndev, true);
+               cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI);
                return;
        } else {
                /* Disable promiscuous mode */
                cpsw_set_promiscious(ndev, false);
        }
 
+       /* Restore allmulti on vlans if necessary */
+       cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
+
        /* Clear all mcast from ALE */
        cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
 
@@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
        const int port = priv->host_port;
        u32 reg;
        int i;
+       int unreg_mcast_mask;
 
        reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
               CPSW2_PORT_VLAN;
@@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
        for (i = 0; i < priv->data.slaves; i++)
                slave_write(priv->slaves + i, vlan, reg);
 
+       if (priv->ndev->flags & IFF_ALLMULTI)
+               unreg_mcast_mask = ALE_ALL_PORTS;
+       else
+               unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+
        cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
                          ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
-                         (ALE_PORT_1 | ALE_PORT_2) << port);
+                         unreg_mcast_mask << port);
 }
 
 static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
                                unsigned short vid)
 {
        int ret;
+       int unreg_mcast_mask;
+
+       if (priv->ndev->flags & IFF_ALLMULTI)
+               unreg_mcast_mask = ALE_ALL_PORTS;
+       else
+               unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
 
        ret = cpsw_ale_add_vlan(priv->ale, vid,
                                ALE_ALL_PORTS << priv->host_port,
                                0, ALE_ALL_PORTS << priv->host_port,
-                               (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+                               unreg_mcast_mask << priv->host_port);
        if (ret != 0)
                return ret;
 
@@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
-                       return -EINVAL;
+                       goto no_phy_slave;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
@@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                         PHY_ID_FMT, mdio->name, phyid);
 
+               slave_data->phy_if = of_get_phy_mode(slave_node);
+               if (slave_data->phy_if < 0) {
+                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+                               i);
+                       return slave_data->phy_if;
+               }
+
+no_phy_slave:
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr) {
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
@@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                                        return ret;
                        }
                }
-
-               slave_data->phy_if = of_get_phy_mode(slave_node);
-               if (slave_data->phy_if < 0) {
-                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
-                               i);
-                       return slave_data->phy_if;
-               }
-
                if (data->dual_emac) {
                        if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
index 0579b2243bb6d7fdfb20e27b761fbdf1d33b00b6..3ae83879a75f5eeb0c44cafcf362a661f1cb86ac 100644 (file)
@@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
        return 0;
 }
 
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
+{
+       u32 ale_entry[ALE_ENTRY_WORDS];
+       int type, idx;
+       int unreg_mcast = 0;
+
+       /* Only bother doing the work if the setting is actually changing */
+       if (ale->allmulti == allmulti)
+               return;
+
+       /* Remember the new setting to check against next time */
+       ale->allmulti = allmulti;
+
+       for (idx = 0; idx < ale->params.ale_entries; idx++) {
+               cpsw_ale_read(ale, idx, ale_entry);
+               type = cpsw_ale_get_entry_type(ale_entry);
+               if (type != ALE_TYPE_VLAN)
+                       continue;
+
+               unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
+               if (allmulti)
+                       unreg_mcast |= 1;
+               else
+                       unreg_mcast &= ~1;
+               cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+               cpsw_ale_write(ale, idx, ale_entry);
+       }
+}
+
 struct ale_control_info {
        const char      *name;
        int             offset, port_offset;
index 31cf43cab42ee7ba70b6948593bc93cf28c5a6e7..c0d4127aa549285c7e50e47214c1579e17478210 100644 (file)
@@ -27,6 +27,7 @@ struct cpsw_ale {
        struct cpsw_ale_params  params;
        struct timer_list       timer;
        unsigned long           ageout;
+       int                     allmulti;
 };
 
 enum cpsw_ale_control {
@@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
 int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
                        int reg_mcast, int unreg_mcast);
 int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
 
 int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
 int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
index 9e17d1a91e7193fb3cf566b468abdd4386889495..78ec33f5100b183e31bfc949763acfa4685b9836 100644 (file)
@@ -550,6 +550,7 @@ do_lso:
 do_send:
        /* Start filling in the page buffers with the rndis hdr */
        rndis_msg->msg_len += rndis_msg_size;
+       packet->total_data_buflen = rndis_msg->msg_len;
        packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
                                        skb, &packet->page_buf[0]);
 
index 29b3bb410781c2530a41ae689b8a91d51e1de3ef..bfb0b6ec8c56e26d67fa94814fe460af34c95fed 100644 (file)
@@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
        struct sk_buff *skb;
        struct sk_buff_head list;
 
-       skb_queue_head_init(&list);
+       __skb_queue_head_init(&list);
 
        spin_lock_bh(&port->bc_queue.lock);
        skb_queue_splice_tail_init(&port->bc_queue, &list);
@@ -1082,9 +1082,15 @@ static void macvlan_port_destroy(struct net_device *dev)
 {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
 
-       cancel_work_sync(&port->bc_work);
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
+
+       /* After this point, no packet can schedule bc_work anymore,
+        * but we need to cancel it and purge left skbs if any.
+        */
+       cancel_work_sync(&port->bc_work);
+       __skb_queue_purge(&port->bc_queue);
+
        kfree_rcu(port, rcu);
 }
 
index 65e2892342bd0cdf31a6c790e3344dc1a4f12816..6f226de655a40759874356c38352b928d1e425f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/idr.h>
 #include <linux/fs.h>
 
+#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
@@ -65,7 +66,7 @@ static struct cdev macvtap_cdev;
 static const struct proto_ops macvtap_socket_ops;
 
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
-                     NETIF_F_TSO6 | NETIF_F_UFO)
+                     NETIF_F_TSO6)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
@@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
                        gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+                       pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
+                                    current->comm);
                        gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
                        break;
                default:
                        return -EINVAL;
@@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-               else if (sinfo->gso_type & SKB_GSO_UDP)
-                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -950,9 +953,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
                        if (arg & TUN_F_TSO6)
                                feature_mask |= NETIF_F_TSO6;
                }
-
-               if (arg & TUN_F_UFO)
-                       feature_mask |= NETIF_F_UFO;
        }
 
        /* tun/tap driver inverts the usage for TSO offloads, where
@@ -963,7 +963,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
         * When user space turns off TSO, we turn off GSO/LRO so that
         * user-space will not receive TSO frames.
         */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
                features |= RX_OFFLOADS;
        else
                features &= ~RX_OFFLOADS;
@@ -1064,7 +1064,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN | TUN_F_UFO))
+                           TUN_F_TSO_ECN))
                        return -EINVAL;
 
                rtnl_lock();
index bd37e45c89c0e13f8128c82ac284c4ba6530d52d..225c033b08f3fbc4217fc238ec7048b611b8d87b 100644 (file)
 #define MII_M1011_PHY_SCR              0x10
 #define MII_M1011_PHY_SCR_AUTO_CROSS   0x0060
 
+#define MII_M1145_PHY_EXT_SR           0x1b
 #define MII_M1145_PHY_EXT_CR           0x14
 #define MII_M1145_RGMII_RX_DELAY       0x0080
 #define MII_M1145_RGMII_TX_DELAY       0x0002
 
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK      0x4
+#define MII_M1145_HWCFG_MODE_MASK              0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO      0x8000
+
 #define MII_M1111_PHY_LED_CONTROL      0x18
 #define MII_M1111_PHY_LED_DIRECT       0x4100
 #define MII_M1111_PHY_LED_COMBINE      0x411c
@@ -676,6 +681,20 @@ static int m88e1145_config_init(struct phy_device *phydev)
                }
        }
 
+       if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+               if (temp < 0)
+                       return temp;
+
+               temp &= ~MII_M1145_HWCFG_MODE_MASK;
+               temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
+               temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
+
+               err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+               if (err < 0)
+                       return err;
+       }
+
        err = marvell_of_reg_init(phydev);
        if (err < 0)
                return err;
index 186ce541c65762f8ee1720aae7f573f145982406..7302398f0b1fff89ffe4a4e373e936c928180645 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
+#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -174,7 +175,7 @@ struct tun_struct {
        struct net_device       *dev;
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
-                         NETIF_F_TSO6|NETIF_F_UFO)
+                         NETIF_F_TSO6)
 
        int                     vnet_hdr_sz;
        int                     sndbuf;
@@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
+       skb_reset_network_header(skb);
+
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1149,8 +1152,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+               {
+                       static bool warned;
+
+                       if (!warned) {
+                               warned = true;
+                               netdev_warn(tun->dev,
+                                           "%s: using disabled UFO feature; please fix this program\n",
+                                           current->comm);
+                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
                        break;
+               }
                default:
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1179,7 +1194,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
-       skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
 
        rxhash = skb_get_hash(skb);
@@ -1251,8 +1265,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-                       else if (sinfo->gso_type & SKB_GSO_UDP)
-                               gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
@@ -1762,11 +1774,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
-
-               if (arg & TUN_F_UFO) {
-                       features |= NETIF_F_UFO;
-                       arg &= ~TUN_F_UFO;
-               }
        }
 
        /* This gives the user a way to test for new features in future by
index be4275721039ad3b01cb1eb44cbca9d85a606597..e6338c16081a5d66c11ad400e876ca7b6c7bcd40 100644 (file)
@@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
 {
        struct usbnet *dev = netdev_priv(net);
        struct sockaddr *addr = p;
+       int ret;
 
        if (netif_running(net))
                return -EBUSY;
@@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
        memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
 
        /* Set the MAC address */
-       return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+       ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
                                 ETH_ALEN, net->dev_addr);
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
 
 static const struct net_device_ops ax88179_netdev_ops = {
index 2a32d9167d3b931fb82c434bb0198e27bfc1d8ae..d3920b54a92ce3de23a581d3cf80e99aa7d61e72 100644 (file)
@@ -67,6 +67,35 @@ static const u8 mbm_guid[16] = {
        0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
 };
 
+static void usbnet_cdc_update_filter(struct usbnet *dev)
+{
+       struct cdc_state        *info = (void *) &dev->data;
+       struct usb_interface    *intf = info->control;
+
+       u16 cdc_filter =
+           USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
+           USB_CDC_PACKET_TYPE_BROADCAST;
+
+       if (dev->net->flags & IFF_PROMISC)
+               cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
+
+       /* FIXME cdc-ether has some multicast code too, though it complains
+        * in routine cases.  info->ether describes the multicast support.
+        * Implement that here, manipulating the cdc filter as needed.
+        */
+
+       usb_control_msg(dev->udev,
+                       usb_sndctrlpipe(dev->udev, 0),
+                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       cdc_filter,
+                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       NULL,
+                       0,
+                       USB_CTRL_SET_TIMEOUT
+               );
+}
+
 /* probes control interface, claims data interface, collects the bulk
  * endpoints, activates data interface (if needed), maybe sets MTU.
  * all pure cdc, except for certain firmware workarounds, and knowing
@@ -347,16 +376,8 @@ next_desc:
         * don't do reset all the way. So the packet filter should
         * be set to a sane initial value.
         */
-       usb_control_msg(dev->udev,
-                       usb_sndctrlpipe(dev->udev, 0),
-                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
-                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
-                       intf->cur_altsetting->desc.bInterfaceNumber,
-                       NULL,
-                       0,
-                       USB_CTRL_SET_TIMEOUT
-               );
+       usbnet_cdc_update_filter(dev);
+
        return 0;
 
 bad_desc:
@@ -468,10 +489,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                return status;
        }
 
-       /* FIXME cdc-ether has some multicast code too, though it complains
-        * in routine cases.  info->ether describes the multicast support.
-        * Implement that here, manipulating the cdc filter as needed.
-        */
        return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
@@ -482,6 +499,7 @@ static const struct driver_info     cdc_info = {
        .bind =         usbnet_cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       usbnet_cdc_status,
+       .set_rx_mode =  usbnet_cdc_update_filter,
        .manage_power = usbnet_manage_power,
 };
 
@@ -491,6 +509,7 @@ static const struct driver_info wwan_info = {
        .bind =         usbnet_cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       usbnet_cdc_status,
+       .set_rx_mode =  usbnet_cdc_update_filter,
        .manage_power = usbnet_manage_power,
 };
 
index e3d84c322e4ec2589d496ad9a6355bd14e08da31..c6554c7a8147045450ec8385797038f1ceba955f 100644 (file)
@@ -1162,6 +1162,9 @@ static void intr_callback(struct urb *urb)
        case -ESHUTDOWN:
                netif_device_detach(tp->netdev);
        case -ENOENT:
+       case -EPROTO:
+               netif_info(tp, intr, tp->netdev,
+                          "Stop submitting intr, status %d\n", status);
                return;
        case -EOVERFLOW:
                netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
@@ -2891,6 +2894,9 @@ static int rtl8152_open(struct net_device *netdev)
        if (res)
                goto out;
 
+       /* set speed to 0 to avoid autoresume try to submit rx */
+       tp->speed = 0;
+
        res = usb_autopm_get_interface(tp->intf);
        if (res < 0) {
                free_all_mem(tp);
@@ -2904,6 +2910,8 @@ static int rtl8152_open(struct net_device *netdev)
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                cancel_delayed_work_sync(&tp->schedule);
+
+               /* disable the tx/rx, if the workqueue has enabled them. */
                if (tp->speed & LINK_STATUS)
                        tp->rtl_ops.disable(tp);
        }
@@ -2955,10 +2963,7 @@ static int rtl8152_close(struct net_device *netdev)
                 * be disable when autoresume occurs, because the
                 * netif_running() would be false.
                 */
-               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_runtime_suspend_enable(tp, false);
-                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-               }
+               rtl_runtime_suspend_enable(tp, false);
 
                tasklet_disable(&tp->tl);
                tp->rtl_ops.down(tp);
@@ -3205,7 +3210,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                netif_device_detach(netdev);
        }
 
-       if (netif_running(netdev)) {
+       if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                tasklet_disable(&tp->tl);
@@ -3253,6 +3258,8 @@ static int rtl8152_resume(struct usb_interface *intf)
                        set_bit(WORK_ENABLE, &tp->flags);
                }
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+       } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+               clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
 
        mutex_unlock(&tp->control);
index 20615bbd693b2eb782f8a6bba08adaff3cfa724a..3a6770a65d7836ace177cff2bb45c925b474a84a 100644 (file)
@@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev)
        clear_bit(EVENT_LINK_CHANGE, &dev->flags);
 }
 
+static void usbnet_set_rx_mode(struct net_device *net)
+{
+       struct usbnet           *dev = netdev_priv(net);
+
+       usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
+}
+
+static void __handle_set_rx_mode(struct usbnet *dev)
+{
+       if (dev->driver_info->set_rx_mode)
+               (dev->driver_info->set_rx_mode)(dev);
+
+       clear_bit(EVENT_SET_RX_MODE, &dev->flags);
+}
+
 /* work that cannot be done in interrupt context uses keventd.
  *
  * NOTE:  with 2.5 we could do more of this using completion callbacks,
@@ -1157,6 +1172,10 @@ skip_reset:
        if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
                __handle_link_change(dev);
 
+       if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
+               __handle_set_rx_mode(dev);
+
+
        if (dev->flags)
                netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
 }
@@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_set_rx_mode        = usbnet_set_rx_mode,
        .ndo_change_mtu         = usbnet_change_mtu,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index d75256bd1a6ab8bb6284ad4088e9513469fffe8a..ec2a8b41ed41a1484c697f1897e0286f5d5d845a 100644 (file)
@@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+               {
+                       static bool warned;
+
+                       if (!warned) {
+                               warned = true;
+                               netdev_warn(dev,
+                                           "host using disabled UFO feature; please fix it\n");
+                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
+               }
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
@@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-               else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
-                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1705,7 +1712,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+                       dev->hw_features |= NETIF_F_TSO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
@@ -1715,11 +1722,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->hw_features |= NETIF_F_TSO6;
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
-               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
-                       dev->hw_features |= NETIF_F_UFO;
 
                if (gso)
-                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1757,8 +1762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1952,9 +1956,9 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-       VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+       VIRTIO_NET_F_GUEST_ECN,
        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
index e5ba6faf32815173fa47fa81fd303082688202bd..86907e5ba6ca21a5e7e6d8ff8b6b20499e004cce 100644 (file)
@@ -80,6 +80,7 @@ struct reg_dmn_pair_mapping {
 
 struct ath_regulatory {
        char alpha2[2];
+       enum nl80211_dfs_regions region;
        u16 country_code;
        u16 max_power_level;
        u16 current_rd;
index c6dd7f1fed65ed52b2ed1d0fd72c474260b5656f..33b0c7aef2ea39f527962f483a8014a052ac10ce 100644 (file)
@@ -368,11 +368,11 @@ void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
 {
        struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 
-       if (reg->power_limit != new_txpow) {
+       if (reg->power_limit != new_txpow)
                ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
-               /* read back in case value is clamped */
-               *txpower = reg->max_power_level;
-       }
+
+       /* read back in case value is clamped */
+       *txpower = reg->max_power_level;
 }
 EXPORT_SYMBOL(ath9k_cmn_update_txpow);
 
index 46f20a309b5f53c10e6d487c999d560de4f5f6e3..5c45e787814ed6a9b65f9f3e523626f282201ddb 100644 (file)
@@ -455,7 +455,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
                         "%2d          %2x      %1x     %2x           %2x\n",
                         i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
                         (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
-                        val[2] & (0x7 << (i * 3)) >> (i * 3),
+                        (val[2] & (0x7 << (i * 3))) >> (i * 3),
                         (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
        }
 
index 156a944134dcfb09ee139ccc2ffde1af306bb936..3bd030494986a087bfd9744873649442fa67b572 100644 (file)
@@ -734,6 +734,32 @@ static const struct ieee80211_iface_combination if_comb[] = {
 #endif
 };
 
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_is_chanctx_enabled())
+               return;
+
+       hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+       hw->queues = ATH9K_NUM_TX_QUEUES;
+       hw->offchannel_tx_hw_queue = hw->queues - 1;
+       hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
+       hw->wiphy->iface_combinations = if_comb_multi;
+       hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
+       hw->wiphy->max_scan_ssids = 255;
+       hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+       hw->wiphy->max_remain_on_channel_duration = 10000;
+       hw->chanctx_data_size = sizeof(void *);
+       hw->extra_beacon_tailroom =
+               sizeof(struct ieee80211_p2p_noa_attr) + 9;
+
+       ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
+}
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -746,7 +772,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                IEEE80211_HW_SPECTRUM_MGMT |
                IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                IEEE80211_HW_SUPPORTS_RC_TABLE |
-               IEEE80211_HW_QUEUE_CONTROL |
                IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
        if (ath9k_ps_enable)
@@ -781,24 +806,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                        hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        }
 
-#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
-
-       if (ath9k_is_chanctx_enabled()) {
-               hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
-               hw->wiphy->iface_combinations = if_comb_multi;
-               hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
-               hw->wiphy->max_scan_ssids = 255;
-               hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
-               hw->wiphy->max_remain_on_channel_duration = 10000;
-               hw->chanctx_data_size = sizeof(void *);
-               hw->extra_beacon_tailroom =
-                       sizeof(struct ieee80211_p2p_noa_attr) + 9;
-
-               ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
-       }
-
-#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
-
        hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -808,12 +815,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
-       /* allow 4 queues per channel context +
-        * 1 cab queue + 1 offchannel tx queue
-        */
-       hw->queues = ATH9K_NUM_TX_QUEUES;
-       /* last queue for offchannel */
-       hw->offchannel_tx_hw_queue = hw->queues - 1;
+       hw->queues = 4;
        hw->max_rates = 4;
        hw->max_listen_interval = 10;
        hw->max_rate_tries = 10;
@@ -837,6 +839,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &common->sbands[IEEE80211_BAND_5GHZ];
 
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+       ath9k_set_mcc_capab(sc, hw);
+#endif
        ath9k_init_wow(hw);
        ath9k_cmn_reload_chainmask(ah);
 
index 6f6a974f7fdb265960db2098f2c45b9684ffca2b..30c66dfcd7a04b8489186c82db741b87f0afbad0 100644 (file)
@@ -1162,6 +1162,9 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
 {
        int i;
 
+       if (!ath9k_is_chanctx_enabled())
+               return;
+
        for (i = 0; i < IEEE80211_NUM_ACS; i++)
                vif->hw_queue[i] = i;
 
index 493a183d0aaf7d04c77103a8c4ea2b2c477089ad..d6e54a3c88f671f08ae3174ddabf6721fb39e051 100644 (file)
@@ -169,7 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
 
        if (txq->stopped &&
            txq->pending_frames < sc->tx.txq_max_pending[q]) {
-               ieee80211_wake_queue(sc->hw, info->hw_queue);
+               if (ath9k_is_chanctx_enabled())
+                       ieee80211_wake_queue(sc->hw, info->hw_queue);
+               else
+                       ieee80211_wake_queue(sc->hw, q);
                txq->stopped = false;
        }
 }
@@ -2247,7 +2250,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                fi->txq = q;
                if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
                    !txq->stopped) {
-                       ieee80211_stop_queue(sc->hw, info->hw_queue);
+                       if (ath9k_is_chanctx_enabled())
+                               ieee80211_stop_queue(sc->hw, info->hw_queue);
+                       else
+                               ieee80211_stop_queue(sc->hw, q);
                        txq->stopped = true;
                }
        }
index 415393dfb6fc17b51d33f4d24e70c6700620d4b5..06ea6cc9e30a5e07c379116a8e1ec0996e3f189e 100644 (file)
@@ -515,6 +515,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
        if (!request)
                return;
 
+       reg->region = request->dfs_region;
        switch (request->initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                /*
@@ -779,6 +780,19 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
                return SD_NO_CTL;
        }
 
+       if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
+               switch (reg->region) {
+               case NL80211_DFS_FCC:
+                       return CTL_FCC;
+               case NL80211_DFS_ETSI:
+                       return CTL_ETSI;
+               case NL80211_DFS_JP:
+                       return CTL_MKK;
+               default:
+                       break;
+               }
+       }
+
        switch (band) {
        case IEEE80211_BAND_2GHZ:
                return reg->regpair->reg_2ghz_ctl;
index f55f625fd06b4aab850ffa16761714a5dff196f6..d20d4e6f391ae89706e422e2b817034ee030c0ae 100644 (file)
@@ -670,7 +670,6 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
                                  struct brcmf_sdio_dev *sdiodev)
 {
        int i;
-       uint fw_len, nv_len;
        char end;
 
        for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
@@ -684,25 +683,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
                return -ENODEV;
        }
 
-       fw_len = sizeof(sdiodev->fw_name) - 1;
-       nv_len = sizeof(sdiodev->nvram_name) - 1;
        /* check if firmware path is provided by module parameter */
        if (brcmf_firmware_path[0] != '\0') {
-               strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len);
-               strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len);
-               fw_len -= strlen(sdiodev->fw_name);
-               nv_len -= strlen(sdiodev->nvram_name);
+               strlcpy(sdiodev->fw_name, brcmf_firmware_path,
+                       sizeof(sdiodev->fw_name));
+               strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
+                       sizeof(sdiodev->nvram_name));
 
                end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
                if (end != '/') {
-                       strncat(sdiodev->fw_name, "/", fw_len);
-                       strncat(sdiodev->nvram_name, "/", nv_len);
-                       fw_len--;
-                       nv_len--;
+                       strlcat(sdiodev->fw_name, "/",
+                               sizeof(sdiodev->fw_name));
+                       strlcat(sdiodev->nvram_name, "/",
+                               sizeof(sdiodev->nvram_name));
                }
        }
-       strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len);
-       strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len);
+       strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
+               sizeof(sdiodev->fw_name));
+       strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
+               sizeof(sdiodev->nvram_name));
 
        return 0;
 }
index 2364a3c09b9eb8037ba6237112c679749a3ac94c..cae692ff1013f9ee00b93861558c6144cfad4cc1 100644 (file)
@@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                             u32 queues, bool drop)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+       u32 scd_queues;
 
        mutex_lock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                goto done;
        }
 
-       /*
-        * mac80211 will not push any more frames for transmit
-        * until the flush is completed
-        */
-       if (drop) {
-               IWL_DEBUG_MAC80211(priv, "send flush command\n");
-               if (iwlagn_txfifo_flush(priv, 0)) {
-                       IWL_ERR(priv, "flush request fail\n");
-                       goto done;
-               }
+       scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
+       scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
+                       BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
+
+       if (vif)
+               scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
+
+       IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
+       if (iwlagn_txfifo_flush(priv, scd_queues)) {
+               IWL_ERR(priv, "flush request fail\n");
+               goto done;
        }
-       IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+       IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
        iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        mutex_unlock(&priv->mutex);
index e4351487ca7269aefe06d4bf2da3c2fedc1f34da..d2b7234b1c73e00f76ed17094d52e0e18431d81f 100644 (file)
@@ -82,7 +82,8 @@
 #define IWL8000_TX_POWER_VERSION       0xffff /* meaningless */
 
 #define IWL8000_FW_PRE "iwlwifi-8000"
-#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+#define IWL8000_MODULE_FIRMWARE(api) \
+       IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_8000         10
 #define DEFAULT_NVM_FILE_FAMILY_8000           "iwl_nvm_8000.bin"
index 9eb85249e89c46aa980ae22893b48af2150ee4df..d8fc548c0d6cd561993eebb6df185e3ccad0d6ac 100644 (file)
@@ -563,6 +563,7 @@ enum iwl_trans_state {
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -589,6 +590,7 @@ struct iwl_trans {
        u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
        bool pm_support;
+       bool ltr_enabled;
 
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
index 8df2021f9856365b2bec8465bb4ddbb02e907586..da2ffb78519431a1b14e9b355280a3c7a3446ed6 100644 (file)
@@ -303,8 +303,8 @@ static const __le64 iwl_ci_mask[][3] = {
 };
 
 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x28412201),
-       cpu_to_le32(0x11118451),
+       cpu_to_le32(0x2e402280),
+       cpu_to_le32(0x7711a751),
 };
 
 struct corunning_block_luts {
index 585c0ab4a3ec5fc59ca8d2e84395c74b27b4fcd2..8a1d2f33d5b7da2768a7e5849e305b6386b0b480 100644 (file)
@@ -291,8 +291,8 @@ static const __le64 iwl_ci_mask[][3] = {
 };
 
 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x28412201),
-       cpu_to_le32(0x11118451),
+       cpu_to_le32(0x2e402280),
+       cpu_to_le32(0x7711a751),
 };
 
 struct corunning_block_luts {
index 27dd86395b39dd0f5800efe9c73a52294a2099ed..2fd8ad4633e0ee717096461398c96fc67181d288 100644 (file)
 
 /* Power Management Commands, Responses, Notifications */
 
+/**
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ *     memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ *     reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ *     D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+enum iwl_ltr_config_flags {
+       LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+       LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+       LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+       LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+       LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+       LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+       LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+};
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwl_ltr_config_flags
+ */
+struct iwl_ltr_config_cmd {
+       __le32 flags;
+       __le32 static_long;
+       __le32 static_short;
+} __packed;
+
 /* Radio LP RX Energy Threshold measured in dBm */
 #define POWER_LPRX_RSSI_THRESHOLD      75
 #define POWER_LPRX_RSSI_THRESHOLD_MAX  94
 #define POWER_LPRX_RSSI_THRESHOLD_MIN  30
 
 /**
- * enum iwl_scan_flags - masks for power table command flags
+ * enum iwl_power_flags - masks for power table command flags
  * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
  *             receiver and transmitter. '0' - does not allow.
  * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
index 667a92274c87773c1d123b41911afb5e38d19405..c62575d86bcdbe5e75c724cacf8605e8569309e6 100644 (file)
@@ -157,6 +157,7 @@ enum {
        /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
        PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+       LTR_CONFIG = 0xee,
 
        /* Thermal Throttling*/
        REPLY_THERMAL_MNG_BACKOFF = 0x7e,
index 23fd711a67e456055b50d171ec243a124ca3ce6c..e0d9f19650b07e5df8b56342ec5e6d1bef88194f 100644 (file)
@@ -480,6 +480,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        /* Initialize tx backoffs to the minimal possible */
        iwl_mvm_tt_tx_backoff(mvm, 0);
 
+       if (mvm->trans->ltr_enabled) {
+               struct iwl_ltr_config_cmd cmd = {
+                       .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+               };
+
+               WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+                                            sizeof(cmd), &cmd));
+       }
+
        ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto error;
index c7a73c68bdabddcb2eb2536256e0a7736be06dc1..585fe5b7100fb8750bed29eda65d91121422497c 100644 (file)
@@ -526,7 +526,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
        }
 
        if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+           !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+           !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
                goto drop;
 
        /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
@@ -1734,6 +1735,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_BEACON &&
            iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
                IWL_WARN(mvm, "Failed updating beacon data\n");
+
+       if (changes & BSS_CHANGED_TXPOWER) {
+               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+                               bss_conf->txpower);
+               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+       }
+
 }
 
 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2367,14 +2375,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
        /* Set the node address */
        memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
 
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->time_event_lock);
+
+       if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
+               spin_unlock_bh(&mvm->time_event_lock);
+               return -EIO;
+       }
+
        te_data->vif = vif;
        te_data->duration = duration;
        te_data->id = HOT_SPOT_CMD;
 
-       lockdep_assert_held(&mvm->mutex);
-
-       spin_lock_bh(&mvm->time_event_lock);
-       list_add_tail(&te_data->list, &mvm->time_event_list);
        spin_unlock_bh(&mvm->time_event_lock);
 
        /*
@@ -2430,22 +2443,23 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
                           duration, type);
 
+       mutex_lock(&mvm->mutex);
+
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                /* Use aux roc framework (HS20) */
                ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
                                               vif, duration);
-               return ret;
+               goto out_unlock;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* handle below */
                break;
        default:
                IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
-       mutex_lock(&mvm->mutex);
-
        for (i = 0; i < NUM_PHY_CTX; i++) {
                phy_ctxt = &mvm->phy_ctxts[i];
                if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
index 15aa298ee79c749b5e7e7e7401d92375a270c29e..48cb25a93591a44678fd051e390dc15c02078744 100644 (file)
@@ -336,6 +336,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(DTS_MEASUREMENT_NOTIFICATION),
        CMD(REPLY_THERMAL_MNG_BACKOFF),
        CMD(MAC_PM_POWER_TABLE),
+       CMD(LTR_CONFIG),
        CMD(BT_COEX_CI),
        CMD(BT_COEX_UPDATE_SW_BOOST),
        CMD(BT_COEX_UPDATE_CORUN_LUT),
index cb85e63c20aa340d70f2be0a3c4f48c3781e8182..b280d5d87127e87ea80f1eb36a477dad3dfcc6e2 100644 (file)
@@ -459,7 +459,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                                basic_ssid ? 1 : 0);
 
        cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
-                                          TX_CMD_FLG_BT_DIS);
+                                          3 << TX_CMD_FLG_BT_PRIO_POS);
+
        cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
        cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        cmd->tx_cmd.rate_n_flags =
index b7f9e61d14e276f5505060a0e1cc78e3f2d1af4c..6dfad230be5ed658f2cb3f295e71b43d724d293c 100644 (file)
@@ -305,8 +305,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
                te_data->running = false;
                te_data->vif = NULL;
                te_data->uid = 0;
+               te_data->id = TE_MAX;
        } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
-               set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
                set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
                te_data->running = true;
                ieee80211_ready_on_channel(mvm->hw); /* Start TE */
index 1cb793a498ac88a221cccb9e5efa31267e4cb028..c6a517c771df5045cd5d2cecc80b4b29848a6c9c 100644 (file)
@@ -175,14 +175,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
 
        /*
         * for data packets, rate info comes from the table inside the fw. This
-        * table is controlled by LINK_QUALITY commands. Exclude ctrl port
-        * frames like EAPOLs which should be treated as mgmt frames. This
-        * avoids them being sent initially in high rates which increases the
-        * chances for completion of the 4-Way handshake.
+        * table is controlled by LINK_QUALITY commands
         */
 
-       if (ieee80211_is_data(fc) && sta &&
-           !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
+       if (ieee80211_is_data(fc) && sta) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
index 1393bac0025c59de0b24b1453eb7932bebda1b22..3781b029e54a328f6304bfb6cf61d52c053cbce7 100644 (file)
@@ -174,6 +174,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u16 lctl;
+       u16 cap;
 
        /*
         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -184,16 +185,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
         *    power savings, even without L1.
         */
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
-               /* L1-ASPM enabled; disable(!) L0S */
+       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
-       } else {
-               /* L1-ASPM disabled; enable(!) L0S */
+       else
                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
-       }
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+       pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+       trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+       dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+                (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+                trans->ltr_enabled ? "En" : "Dis");
 }
 
 /*
@@ -428,7 +430,7 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
        ret = iwl_poll_bit(trans, CSR_RESET,
                           CSR_RESET_REG_FLAG_MASTER_DISABLED,
                           CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-       if (ret)
+       if (ret < 0)
                IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
 
        IWL_DEBUG_INFO(trans, "stop master\n");
@@ -544,7 +546,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
                msleep(25);
        }
 
-       IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
+       IWL_ERR(trans, "Couldn't prepare the card\n");
 
        return ret;
 }
@@ -1043,7 +1045,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                           CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                           CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                           25000);
-       if (ret) {
+       if (ret < 0) {
                IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
                return ret;
        }
index 40057079ffb9537eae9549518be3e5df29f1a16b..5ef5a0eeba50eb682d30e5879f565ac48ad217e4 100644 (file)
@@ -196,6 +196,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
        mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
 
        del_timer_sync(&tbl->timer_context.timer);
+       tbl->timer_context.timer_is_set = false;
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_del(&tbl->list);
@@ -297,6 +298,7 @@ mwifiex_flush_data(unsigned long context)
                (struct reorder_tmr_cnxt *) context;
        int start_win, seq_num;
 
+       ctx->timer_is_set = false;
        seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
        if (seq_num < 0)
@@ -385,6 +387,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
 
        new_node->timer_context.ptr = new_node;
        new_node->timer_context.priv = priv;
+       new_node->timer_context.timer_is_set = false;
 
        init_timer(&new_node->timer_context.timer);
        new_node->timer_context.timer.function = mwifiex_flush_data;
@@ -399,6 +402,22 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
+static void
+mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl)
+{
+       u32 min_flush_time;
+
+       if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32)
+               min_flush_time = MIN_FLUSH_TIMER_15_MS;
+       else
+               min_flush_time = MIN_FLUSH_TIMER_MS;
+
+       mod_timer(&tbl->timer_context.timer,
+                 jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size));
+
+       tbl->timer_context.timer_is_set = true;
+}
+
 /*
  * This function prepares command for adding a BA request.
  *
@@ -523,31 +542,31 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                                u8 *ta, u8 pkt_type, void *payload)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
-       int start_win, end_win, win_size;
+       int prev_start_win, start_win, end_win, win_size;
        u16 pkt_index;
        bool init_window_shift = false;
+       int ret = 0;
 
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
                if (pkt_type != PKT_TYPE_BAR)
                        mwifiex_11n_dispatch_pkt(priv, payload);
-               return 0;
+               return ret;
        }
 
        if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
                mwifiex_11n_dispatch_pkt(priv, payload);
-               return 0;
+               return ret;
        }
 
        start_win = tbl->start_win;
+       prev_start_win = start_win;
        win_size = tbl->win_size;
        end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
        if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
                init_window_shift = true;
                tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
        }
-       mod_timer(&tbl->timer_context.timer,
-                 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
 
        if (tbl->flags & RXREOR_FORCE_NO_DROP) {
                dev_dbg(priv->adapter->dev,
@@ -568,11 +587,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
                        if (seq_num >= ((start_win + TWOPOW11) &
                                        (MAX_TID_VALUE - 1)) &&
-                           seq_num < start_win)
-                               return -1;
+                           seq_num < start_win) {
+                               ret = -1;
+                               goto done;
+                       }
                } else if ((seq_num < start_win) ||
-                          (seq_num > (start_win + TWOPOW11))) {
-                       return -1;
+                          (seq_num >= (start_win + TWOPOW11))) {
+                       ret = -1;
+                       goto done;
                }
        }
 
@@ -601,8 +623,10 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                else
                        pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
 
-               if (tbl->rx_reorder_ptr[pkt_index])
-                       return -1;
+               if (tbl->rx_reorder_ptr[pkt_index]) {
+                       ret = -1;
+                       goto done;
+               }
 
                tbl->rx_reorder_ptr[pkt_index] = payload;
        }
@@ -613,7 +637,11 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
         */
        mwifiex_11n_scan_and_dispatch(priv, tbl);
 
-       return 0;
+done:
+       if (!tbl->timer_context.timer_is_set ||
+           prev_start_win != tbl->start_win)
+               mwifiex_11n_rxreorder_timer_restart(tbl);
+       return ret;
 }
 
 /*
index 3a87bb0e3a62adb477784a94feba13cefd32626b..63ecea89b4ab562cc00942b2542fac63ed8202e0 100644 (file)
@@ -21,6 +21,8 @@
 #define _MWIFIEX_11N_RXREORDER_H_
 
 #define MIN_FLUSH_TIMER_MS             50
+#define MIN_FLUSH_TIMER_15_MS          15
+#define MWIFIEX_BA_WIN_SIZE_32         32
 
 #define PKT_TYPE_BAR 0xE7
 #define MAX_TID_VALUE                  (2 << 11)
index e2635747d9669c21f30f08780a2024c70af2245e..f55658d15c60710d44603d12b9fbab58414050fc 100644 (file)
@@ -592,6 +592,7 @@ struct reorder_tmr_cnxt {
        struct timer_list timer;
        struct mwifiex_rx_reorder_tbl *ptr;
        struct mwifiex_private *priv;
+       u8 timer_is_set;
 };
 
 struct mwifiex_rx_reorder_tbl {
index 573897b8e878a2e6bfc3aa61a9df72d9b99e0372..8444313eabe2bbc1bdcb660c5a82bff17f3fdb90 100644 (file)
@@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Ovislink */
        { USB_DEVICE(0x1b75, 0x3071) },
        { USB_DEVICE(0x1b75, 0x3072) },
+       { USB_DEVICE(0x1b75, 0xa200) },
        /* Para */
        { USB_DEVICE(0x20b8, 0x8888) },
        /* Pegatron */
index 58ba718308862391b108a41e0eab1e7ff7fa226a..40b6d1d006d7ab7dc9076a967fedae75259dc1fd 100644 (file)
@@ -467,7 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
                    rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
        /* <2> work queue */
        rtlpriv->works.hw = hw;
-       rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
        INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
                          (void *)rtl_watchdog_wq_callback);
        INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
index f6179bc060869cfdb6dd6d9d9c90d8cc43197904..07dae0d44abc09a78025d71e32570dc68b50194c 100644 (file)
@@ -1828,3 +1828,9 @@ const struct ieee80211_ops rtl_ops = {
        .flush = rtl_op_flush,
 };
 EXPORT_SYMBOL_GPL(rtl_ops);
+
+bool rtl_btc_status_false(void)
+{
+       return false;
+}
+EXPORT_SYMBOL_GPL(rtl_btc_status_false);
index 59cd3b9dca25f895bcd6ee24f43f366867c8e41a..624e1dc16d31d383ba92cbc8cd08e28d180df882 100644 (file)
@@ -42,5 +42,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
                     u32 mask, u32 data);
 void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
 bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+bool rtl_btc_status_false(void);
 
 #endif
index 667aba81246c9ac094db11a5c455be9c29d273ad..25daa8715219c6b89f110bb048cace12e3414159 100644 (file)
@@ -1796,7 +1796,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
        rtl_pci_reset_trx_ring(hw);
 
        rtlpci->driver_is_goingto_unload = false;
-       if (rtlpriv->cfg->ops->get_btc_status()) {
+       if (rtlpriv->cfg->ops->get_btc_status &&
+           rtlpriv->cfg->ops->get_btc_status()) {
                rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
                rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
        }
index a00861b26ece7ae6b2c6168ca22bdaed42c71033..29983bc96a89289187617a5cf202386f74386639 100644 (file)
@@ -656,7 +656,8 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 };
 
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+        bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *))
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -722,7 +723,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
        memcpy((u8 *)skb_put(skb, totalpacketlen),
               &reserved_page_packet, totalpacketlen);
 
-       rtstatus = rtl_cmd_send_packet(hw, skb);
+       if (cmd_send_packet)
+               rtstatus = cmd_send_packet(hw, skb);
+       else
+               rtstatus = rtl_cmd_send_packet(hw, skb);
 
        if (rtstatus)
                b_dlok = true;
index a815bd6273da0645cf86f6f77050f220875ec6ff..b64ae45dc6743869da3746a7382683936e86f88c 100644 (file)
@@ -109,7 +109,9 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                         u32 cmd_len, u8 *p_cmdbuffer);
 void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_rsvdpagepkt
+       (struct ieee80211_hw *hw,
+        bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *));
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
 void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
index 831df101d7b7a97866b55283c8e998e3c9152806..9b660df6fd712fd517638723bb0616b17b9df257 100644 (file)
        LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
 #define        GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr)       \
        LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc)                  \
+       SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
 
 #define CHIP_VER_B                     BIT(4)
 #define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
index 8ec0f031f48a575035dbe59af4b2f236d4260c72..55357d69397a3370d8f4081ecec3d578daa3b8b9 100644 (file)
@@ -459,7 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
 
-                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               rtl92c_set_fw_rsvdpagepkt(hw, NULL);
 
                                _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
index d86b5b566444e1098a666909e3d86865c00f0b7c..46ea07605eb47f449f7850dea3b8b0862555ffde 100644 (file)
@@ -244,6 +244,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
        .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
        .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
+       .get_btc_status = rtl_btc_status_false,
 };
 
 static struct rtl_mod_params rtl92ce_mod_params = {
index 2fb9c7acb76a3bf7d6fa1feee728b15365b86207..dc3d20b17a265479906fa9d6777cc6e29e533f34 100644 (file)
@@ -728,6 +728,9 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
                case HW_DESC_RXPKT_LEN:
                        ret = GET_RX_DESC_PKT_LEN(pdesc);
                        break;
+               case HW_DESC_RXBUFF_ADDR:
+                       ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc);
+                       break;
                default:
                        RT_ASSERT(false, "ERR rxdesc :%d not process\n",
                                  desc_name);
index 04aa0b5f5b3d10a1ae16eb8db5ea74d4f38e1890..873363acbacfac12df0d0c724dc962c74b2a2fc7 100644 (file)
@@ -1592,6 +1592,20 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        }
 }
 
+bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+  /* Currently nothing happens here.
+   * Traffic stops after some seconds in WPA2 802.11n mode.
+   * Maybe because rtl8192cu chip should be set from here?
+   * If I understand correctly, the realtek vendor driver sends some urbs
+   * if its "here".
+   *
+   * This is maybe necessary:
+   * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
+   */
+       return true;
+}
+
 void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1939,7 +1953,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        recover = true;
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
-                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               rtl92c_set_fw_rsvdpagepkt(hw,
+                                                         &usb_cmd_send_packet);
                                _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
                                if (recover)
index 0f7812e0c8aa0c75d5382c52b4872431447c8108..c1e33b0228c0176e1ae31f5a8f99085a4f840397 100644 (file)
@@ -104,7 +104,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
 int rtl92c_download_fw(struct ieee80211_hw *hw);
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
                         u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
index 7c5fbaf5fee0d790d1cfbeaa563a1c360a1a976b..e06bafee37f9764b4041bd64665c46e3a889d238 100644 (file)
@@ -101,6 +101,12 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
        }
 }
 
+/* get bt coexist status */
+static bool rtl92cu_get_btc_status(void)
+{
+       return false;
+}
+
 static struct rtl_hal_ops rtl8192cu_hal_ops = {
        .init_sw_vars = rtl92cu_init_sw_vars,
        .deinit_sw_vars = rtl92cu_deinit_sw_vars,
@@ -148,6 +154,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
        .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
        .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
        .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
+       .get_btc_status = rtl92cu_get_btc_status,
 };
 
 static struct rtl_mod_params rtl92cu_mod_params = {
index edab5a5351b52a814ec10833109666309279e2b1..a0aba088259aa97b6f80fb37e02d6f0c881b039b 100644 (file)
@@ -251,6 +251,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
        .get_rfreg = rtl92d_phy_query_rf_reg,
        .set_rfreg = rtl92d_phy_set_rf_reg,
        .linked_set_reg = rtl92d_linked_set_reg,
+       .get_btc_status = rtl_btc_status_false,
 };
 
 static struct rtl_mod_params rtl92de_mod_params = {
index dfdc9b20e4ad4d868012c80f4233559e58050052..1a87edca2c3f36a1d540eabc4c3247da5b1959d6 100644 (file)
@@ -362,7 +362,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                         "switch case not process %x\n", variable);
                break;
        }
@@ -591,7 +591,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_BEQEN);
                                break;
                        default:
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                                         "switch case not process\n");
                                break;
                        }
@@ -710,7 +710,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                         "switch case not process %x\n", variable);
                break;
        }
@@ -2424,7 +2424,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                                 "switch case not process\n");
                        enc_algo = CAM_TKIP;
                        break;
index 83c98674bfd3176f8fc512f80b7a31be83ba87da..6e7a70b43949eab504d70a70742dc216fb6e27d1 100644 (file)
 /* DWORD 6 */
 #define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val)  \
        SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
+#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc)                  \
+       SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
 
 #define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
        (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M ||  \
index 1bff2a0f760087d0a510e247f01d1c6a33eee891..aadba29c167aff4c8ddf38db610db3cdfcbfcd93 100644 (file)
@@ -87,11 +87,8 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
 static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
 {
        struct ieee80211_hw *hw = context;
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
        struct rt_firmware *pfirmware = NULL;
-       int err;
 
        RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
                         "Firmware callback routine entered!\n");
@@ -112,20 +109,6 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
        memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
        pfirmware->sz_fw_tmpbufferlen = firmware->size;
        release_firmware(firmware);
-
-       err = ieee80211_register_hw(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't register mac80211 hw\n");
-               return;
-       } else {
-               rtlpriv->mac80211.mac80211_registered = 1;
-       }
-       rtlpci->irq_alloc = 1;
-       set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
-
-       /*init rfkill */
-       rtl_init_rfkill(hw);
 }
 
 static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
@@ -226,8 +209,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
        if (!rtlpriv->rtlhal.pfirmware)
                return 1;
 
-       rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE;
-
+       rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
+                              sizeof(struct fw_hdr);
        pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
                "Loading firmware %s\n", rtlpriv->cfg->fw_name);
        /* request fw */
@@ -294,6 +277,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
        .set_bbreg = rtl92s_phy_set_bb_reg,
        .get_rfreg = rtl92s_phy_query_rf_reg,
        .set_rfreg = rtl92s_phy_set_rf_reg,
+       .get_btc_status = rtl_btc_status_false,
 };
 
 static struct rtl_mod_params rtl92se_mod_params = {
index b358ebce8942127413105b39e3799006d559eed8..672fd3b02835e30414044311af02a3383886e554 100644 (file)
@@ -640,6 +640,9 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
                case HW_DESC_RXPKT_LEN:
                        ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
                        break;
+               case HW_DESC_RXBUFF_ADDR:
+                       ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
+                       break;
                default:
                        RT_ASSERT(false, "ERR rxdesc :%d not process\n",
                                  desc_name);
index 9786313dc62f2b3a50143bffc59255e8f1d45af8..1e9570fa874fe6309962d2de2251fc923da424cb 100644 (file)
@@ -1889,15 +1889,18 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
        struct rtl_phy *rtlphy = &rtlpriv->phy;
        u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
 
-       if (band != BAND_ON_2_4G && band != BAND_ON_5G)
+       if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
-
-       if (rfpath >= MAX_RF_PATH)
+               band = BAND_ON_2_4G;
+       }
+       if (rfpath >= MAX_RF_PATH) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
-
-       if (txnum >= MAX_RF_PATH)
+               rfpath = MAX_RF_PATH - 1;
+       }
+       if (txnum >= MAX_RF_PATH) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
-
+               txnum = MAX_RF_PATH - 1;
+       }
        rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
index 10cf69c4bc42ea93adefdab5569cbe90a19a4a63..46ee956d0235dc79dbb8130242b5b2586fd4db5e 100644 (file)
@@ -1117,7 +1117,18 @@ int rtl_usb_probe(struct usb_interface *intf,
        }
        rtlpriv->cfg->ops->init_sw_leds(hw);
 
+       err = ieee80211_register_hw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Can't register mac80211 hw.\n");
+               err = -ENODEV;
+               goto error_out;
+       }
+       rtlpriv->mac80211.mac80211_registered = 1;
+
+       set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
        return 0;
+
 error_out:
        rtl_deinit_core(hw);
        _rtl_usb_io_handler_release(hw);
index d4eb8d2e9cb7f7f2bc0b3816dd788e6eb01c7927..083ecc93fe5e7b0941aedba3ccb9fd27158cc03b 100644 (file)
@@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
        struct xen_netif_rx_back_ring rx;
        struct sk_buff_head rx_queue;
-       RING_IDX rx_last_skb_slots;
-       unsigned long status;
 
-       struct timer_list rx_stalled;
+       unsigned int rx_queue_max;
+       unsigned int rx_queue_len;
+       unsigned long last_rx_time;
+       bool stalled;
 
        struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
 
@@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct xenvif_stats stats;
 };
 
+/* Maximum number of Rx slots a to-guest packet may use, including the
+ * slot needed for GSO meta-data.
+ */
+#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
+
 enum state_bit_shift {
        /* This bit marks that the vif is connected */
        VIF_STATUS_CONNECTED,
-       /* This bit signals the RX thread that queuing was stopped (in
-        * start_xmit), and either the timer fired or an RX interrupt came
-        */
-       QUEUE_STATUS_RX_PURGE_EVENT,
-       /* This bit tells the interrupt handler that this queue was the reason
-        * for the carrier off, so it should kick the thread. Only queues which
-        * brought it down can turn on the carrier.
-        */
-       QUEUE_STATUS_RX_STALLED
 };
 
 struct xenvif {
@@ -228,9 +225,6 @@ struct xenvif {
        u8 ip_csum:1;
        u8 ipv6_csum:1;
 
-       /* Internal feature information. */
-       u8 can_queue:1;     /* can queue packets for receiver? */
-
        /* Is this interface disabled? True when backend discovers
         * frontend is rogue.
         */
@@ -240,6 +234,9 @@ struct xenvif {
        /* Queues */
        struct xenvif_queue *queues;
        unsigned int num_queues; /* active queues, resource allocated */
+       unsigned int stalled_queues;
+
+       spinlock_t lock;
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *xenvif_dbg_root;
@@ -249,6 +246,14 @@ struct xenvif {
        struct net_device *dev;
 };
 
+struct xenvif_rx_cb {
+       unsigned long expires;
+       int meta_slots_used;
+       bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
 {
        return to_xenbus_device(vif->dev->dev.parent);
@@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void);
 
 int xenvif_schedulable(struct xenvif *vif);
 
-int xenvif_must_stop_queue(struct xenvif_queue *queue);
-
 int xenvif_queue_stopped(struct xenvif_queue *queue);
 void xenvif_wake_queue(struct xenvif_queue *queue);
 
@@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
 
 int xenvif_dealloc_kthread(void *data);
 
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+
 /* Determine whether the needed number of slots (req) are available,
  * and set req_event if not.
  */
index f379689dde309b7bf63eb511ef7f28a827f71fc5..895fe84011e7ea4a2138b33d9e84ab802f1142f6 100644 (file)
@@ -43,6 +43,9 @@
 #define XENVIF_QUEUE_LENGTH 32
 #define XENVIF_NAPI_WEIGHT  64
 
+/* Number of bytes allowed on the internal guest Rx queue. */
+#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+
 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
  * increasing the inflight counter. We need to increase the inflight
  * counter because core driver calls into xenvif_zerocopy_callback
@@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
        atomic_dec(&queue->inflight_packets);
 }
 
-static inline void xenvif_stop_queue(struct xenvif_queue *queue)
-{
-       struct net_device *dev = queue->vif->dev;
-
-       if (!queue->vif->can_queue)
-               return;
-
-       netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
-}
-
 int xenvif_schedulable(struct xenvif *vif)
 {
        return netif_running(vif->dev) &&
-               test_bit(VIF_STATUS_CONNECTED, &vif->status);
+               test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+               !vif->disabled;
 }
 
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
-       struct netdev_queue *net_queue =
-               netdev_get_tx_queue(queue->vif->dev, queue->id);
 
-       /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
-        * the carrier went down and this queue was previously blocked
-        */
-       if (unlikely(netif_tx_queue_stopped(net_queue) ||
-                    (!netif_carrier_ok(queue->vif->dev) &&
-                     test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
-               set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
        xenvif_kick_thread(queue);
 
        return IRQ_HANDLED;
@@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
        netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
 }
 
-/* Callback to wake the queue's thread and turn the carrier off on timeout */
-static void xenvif_rx_stalled(unsigned long data)
-{
-       struct xenvif_queue *queue = (struct xenvif_queue *)data;
-
-       if (xenvif_queue_stopped(queue)) {
-               set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
-               xenvif_kick_thread(queue);
-       }
-}
-
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
        unsigned int num_queues = vif->num_queues;
        u16 index;
-       int min_slots_needed;
+       struct xenvif_rx_cb *cb;
 
        BUG_ON(skb->dev != dev);
 
@@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
            !xenvif_schedulable(vif))
                goto drop;
 
-       /* At best we'll need one slot for the header and one for each
-        * frag.
-        */
-       min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
-
-       /* If the skb is GSO then we'll also need an extra slot for the
-        * metadata.
-        */
-       if (skb_is_gso(skb))
-               min_slots_needed++;
+       cb = XENVIF_RX_CB(skb);
+       cb->expires = jiffies + rx_drain_timeout_jiffies;
 
-       /* If the skb can't possibly fit in the remaining slots
-        * then turn off the queue to give the ring a chance to
-        * drain.
-        */
-       if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
-               queue->rx_stalled.function = xenvif_rx_stalled;
-               queue->rx_stalled.data = (unsigned long)queue;
-               xenvif_stop_queue(queue);
-               mod_timer(&queue->rx_stalled,
-                         jiffies + rx_drain_timeout_jiffies);
-       }
-
-       skb_queue_tail(&queue->rx_queue, skb);
+       xenvif_rx_queue_tail(queue, skb);
        xenvif_kick_thread(queue);
 
        return NETDEV_TX_OK;
@@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->queues = NULL;
        vif->num_queues = 0;
 
+       spin_lock_init(&vif->lock);
+
        dev->netdev_ops = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
        init_timer(&queue->credit_timeout);
        queue->credit_window_start = get_jiffies_64();
 
+       queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
+
        skb_queue_head_init(&queue->rx_queue);
        skb_queue_head_init(&queue->tx_queue);
 
@@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
                queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
        }
 
-       init_timer(&queue->rx_stalled);
-
        return 0;
 }
 
@@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)
                dev_set_mtu(vif->dev, ETH_DATA_LEN);
        netdev_update_features(vif->dev);
        set_bit(VIF_STATUS_CONNECTED, &vif->status);
-       netif_carrier_on(vif->dev);
        if (netif_running(vif->dev))
                xenvif_up(vif);
        rtnl_unlock();
@@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                disable_irq(queue->rx_irq);
        }
 
+       queue->stalled = true;
+
        task = kthread_create(xenvif_kthread_guest_rx,
                              (void *)queue, "%s-guest-rx", queue->name);
        if (IS_ERR(task)) {
@@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)
                netif_napi_del(&queue->napi);
 
                if (queue->task) {
-                       del_timer_sync(&queue->rx_stalled);
                        kthread_stop(queue->task);
                        queue->task = NULL;
                }
index 08f65996534cbd1b861c0031b70ebd6d78d47720..6563f0713fc0f5e324a18e2d87ba1f4614e1a008 100644 (file)
 bool separate_tx_rx_irq = 1;
 module_param(separate_tx_rx_irq, bool, 0644);
 
-/* When guest ring is filled up, qdisc queues the packets for us, but we have
- * to timeout them, otherwise other guests' packets can get stuck there
+/* The time that packets can stay on the guest Rx internal queue
+ * before they are dropped.
  */
 unsigned int rx_drain_timeout_msecs = 10000;
 module_param(rx_drain_timeout_msecs, uint, 0444);
 unsigned int rx_drain_timeout_jiffies;
 
+/* The length of time before the frontend is considered unresponsive
+ * because it isn't providing Rx slots.
+ */
+static unsigned int rx_stall_timeout_msecs = 60000;
+module_param(rx_stall_timeout_msecs, uint, 0444);
+static unsigned int rx_stall_timeout_jiffies;
+
 unsigned int xenvif_max_queues;
 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
 MODULE_PARM_DESC(max_queues,
@@ -83,7 +90,6 @@ static void make_tx_response(struct xenvif_queue *queue,
                             s8       st);
 
 static inline int tx_work_todo(struct xenvif_queue *queue);
-static inline int rx_work_todo(struct xenvif_queue *queue);
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
                                             u16      id,
@@ -163,6 +169,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
        return false;
 }
 
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+       __skb_queue_tail(&queue->rx_queue, skb);
+
+       queue->rx_queue_len += skb->len;
+       if (queue->rx_queue_len > queue->rx_queue_max)
+               netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+}
+
+static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+
+       spin_lock_irq(&queue->rx_queue.lock);
+
+       skb = __skb_dequeue(&queue->rx_queue);
+       if (skb)
+               queue->rx_queue_len -= skb->len;
+
+       spin_unlock_irq(&queue->rx_queue.lock);
+
+       return skb;
+}
+
+static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
+{
+       spin_lock_irq(&queue->rx_queue.lock);
+
+       if (queue->rx_queue_len < queue->rx_queue_max)
+               netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+       spin_unlock_irq(&queue->rx_queue.lock);
+}
+
+
+static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+       while ((skb = xenvif_rx_dequeue(queue)) != NULL)
+               kfree_skb(skb);
+}
+
+static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+
+       for(;;) {
+               skb = skb_peek(&queue->rx_queue);
+               if (!skb)
+                       break;
+               if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
+                       break;
+               xenvif_rx_dequeue(queue);
+               kfree_skb(skb);
+       }
+}
+
 /*
  * Returns true if we should start a new receive buffer instead of
  * adding 'size' bytes to a buffer which currently contains 'offset'
@@ -237,13 +306,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
        return meta;
 }
 
-struct xenvif_rx_cb {
-       int meta_slots_used;
-       bool full_coalesce;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
 /*
  * Set up the grant operations for this fragment. If it's a flipping
  * interface, we also set up the unmap request from here.
@@ -587,12 +649,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
        skb_queue_head_init(&rxq);
 
-       while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
+       while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+              && (skb = xenvif_rx_dequeue(queue)) != NULL) {
                RING_IDX max_slots_needed;
                RING_IDX old_req_cons;
                RING_IDX ring_slots_used;
                int i;
 
+               queue->last_rx_time = jiffies;
+
                /* We need a cheap worse case estimate for the number of
                 * slots we'll use.
                 */
@@ -634,15 +699,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
                        max_slots_needed++;
 
-               /* If the skb may not fit then bail out now */
-               if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
-                       skb_queue_head(&queue->rx_queue, skb);
-                       need_to_notify = true;
-                       queue->rx_last_skb_slots = max_slots_needed;
-                       break;
-               } else
-                       queue->rx_last_skb_slots = 0;
-
                old_req_cons = queue->rx.req_cons;
                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
                ring_slots_used = queue->rx.req_cons - old_req_cons;
@@ -1869,12 +1925,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
        }
 }
 
-static inline int rx_work_todo(struct xenvif_queue *queue)
-{
-       return (!skb_queue_empty(&queue->rx_queue) &&
-              xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
-}
-
 static inline int tx_work_todo(struct xenvif_queue *queue)
 {
        if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
@@ -1931,92 +1981,121 @@ err:
        return err;
 }
 
-static void xenvif_start_queue(struct xenvif_queue *queue)
+static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
 {
-       if (xenvif_schedulable(queue->vif))
-               xenvif_wake_queue(queue);
+       struct xenvif *vif = queue->vif;
+
+       queue->stalled = true;
+
+       /* At least one queue has stalled? Disable the carrier. */
+       spin_lock(&vif->lock);
+       if (vif->stalled_queues++ == 0) {
+               netdev_info(vif->dev, "Guest Rx stalled");
+               netif_carrier_off(vif->dev);
+       }
+       spin_unlock(&vif->lock);
 }
 
-/* Only called from the queue's thread, it handles the situation when the guest
- * doesn't post enough requests on the receiving ring.
- * First xenvif_start_xmit disables QDisc and start a timer, and then either the
- * timer fires, or the guest send an interrupt after posting new request. If it
- * is the timer, the carrier is turned off here.
- * */
-static void xenvif_rx_purge_event(struct xenvif_queue *queue)
+static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
 {
-       /* Either the last unsuccesful skb or at least 1 slot should fit */
-       int needed = queue->rx_last_skb_slots ?
-                    queue->rx_last_skb_slots : 1;
+       struct xenvif *vif = queue->vif;
 
-       /* It is assumed that if the guest post new slots after this, the RX
-        * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
-        * the thread again
-        */
-       set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
-       if (!xenvif_rx_ring_slots_available(queue, needed)) {
-               rtnl_lock();
-               if (netif_carrier_ok(queue->vif->dev)) {
-                       /* Timer fired and there are still no slots. Turn off
-                        * everything except the interrupts
-                        */
-                       netif_carrier_off(queue->vif->dev);
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
-               } else {
-                       /* Probably an another queue already turned the carrier
-                        * off, make sure nothing is stucked in the internal
-                        * queue of this queue
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-               }
-               rtnl_unlock();
-       } else if (!netif_carrier_ok(queue->vif->dev)) {
-               unsigned int num_queues = queue->vif->num_queues;
-               unsigned int i;
-               /* The carrier was down, but an interrupt kicked
-                * the thread again after new requests were
-                * posted
-                */
-               clear_bit(QUEUE_STATUS_RX_STALLED,
-                         &queue->status);
-               rtnl_lock();
-               netif_carrier_on(queue->vif->dev);
-               netif_tx_wake_all_queues(queue->vif->dev);
-               rtnl_unlock();
+       queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
+       queue->stalled = false;
 
-               for (i = 0; i < num_queues; i++) {
-                       struct xenvif_queue *temp = &queue->vif->queues[i];
+       /* All queues are ready? Enable the carrier. */
+       spin_lock(&vif->lock);
+       if (--vif->stalled_queues == 0) {
+               netdev_info(vif->dev, "Guest Rx ready");
+               netif_carrier_on(vif->dev);
+       }
+       spin_unlock(&vif->lock);
+}
 
-                       xenvif_napi_schedule_or_enable_events(temp);
-               }
-               if (net_ratelimit())
-                       netdev_err(queue->vif->dev, "Carrier on again\n");
-       } else {
-               /* Queuing were stopped, but the guest posted
-                * new requests and sent an interrupt
-                */
-               clear_bit(QUEUE_STATUS_RX_STALLED,
-                         &queue->status);
-               del_timer_sync(&queue->rx_stalled);
-               xenvif_start_queue(queue);
+static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+
+       prod = queue->rx.sring->req_prod;
+       cons = queue->rx.req_cons;
+
+       return !queue->stalled
+               && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+               && time_after(jiffies,
+                             queue->last_rx_time + rx_stall_timeout_jiffies);
+}
+
+static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+
+       prod = queue->rx.sring->req_prod;
+       cons = queue->rx.req_cons;
+
+       return queue->stalled
+               && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+}
+
+static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+{
+       return (!skb_queue_empty(&queue->rx_queue)
+               && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+               || xenvif_rx_queue_stalled(queue)
+               || xenvif_rx_queue_ready(queue)
+               || kthread_should_stop()
+               || queue->vif->disabled;
+}
+
+static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+       long timeout;
+
+       skb = skb_peek(&queue->rx_queue);
+       if (!skb)
+               return MAX_SCHEDULE_TIMEOUT;
+
+       timeout = XENVIF_RX_CB(skb)->expires - jiffies;
+       return timeout < 0 ? 0 : timeout;
+}
+
+/* Wait until the guest Rx thread has work.
+ *
+ * The timeout needs to be adjusted based on the current head of the
+ * queue (and not just the head at the beginning).  In particular, if
+ * the queue is initially empty an infinite timeout is used and this
+ * needs to be reduced when a skb is queued.
+ *
+ * This cannot be done with wait_event_timeout() because it only
+ * calculates the timeout once.
+ */
+static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+{
+       DEFINE_WAIT(wait);
+
+       if (xenvif_have_rx_work(queue))
+               return;
+
+       for (;;) {
+               long ret;
+
+               prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+               if (xenvif_have_rx_work(queue))
+                       break;
+               ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+               if (!ret)
+                       break;
        }
+       finish_wait(&queue->wq, &wait);
 }
 
 int xenvif_kthread_guest_rx(void *data)
 {
        struct xenvif_queue *queue = data;
-       struct sk_buff *skb;
+       struct xenvif *vif = queue->vif;
 
-       while (!kthread_should_stop()) {
-               wait_event_interruptible(queue->wq,
-                                        rx_work_todo(queue) ||
-                                        queue->vif->disabled ||
-                                        test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
-                                        kthread_should_stop());
+       for (;;) {
+               xenvif_wait_for_rx_work(queue);
 
                if (kthread_should_stop())
                        break;
@@ -2028,35 +2107,38 @@ int xenvif_kthread_guest_rx(void *data)
                 * context so we defer it here, if this thread is
                 * associated with queue 0.
                 */
-               if (unlikely(queue->vif->disabled && queue->id == 0)) {
-                       xenvif_carrier_off(queue->vif);
-               } else if (unlikely(queue->vif->disabled)) {
-                       /* kthread_stop() would be called upon this thread soon,
-                        * be a bit proactive
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-               } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
-                                                    &queue->status))) {
-                       xenvif_rx_purge_event(queue);
-               } else if (!netif_carrier_ok(queue->vif->dev)) {
-                       /* Another queue stalled and turned the carrier off, so
-                        * purge the internal queue of queues which were not
-                        * blocked
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
+               if (unlikely(vif->disabled && queue->id == 0)) {
+                       xenvif_carrier_off(vif);
+                       xenvif_rx_queue_purge(queue);
+                       continue;
                }
 
                if (!skb_queue_empty(&queue->rx_queue))
                        xenvif_rx_action(queue);
 
+               /* If the guest hasn't provided any Rx slots for a
+                * while it's probably not responsive, drop the
+                * carrier so packets are dropped earlier.
+                */
+               if (xenvif_rx_queue_stalled(queue))
+                       xenvif_queue_carrier_off(queue);
+               else if (xenvif_rx_queue_ready(queue))
+                       xenvif_queue_carrier_on(queue);
+
+               /* Queued packets may have foreign pages from other
+                * domains.  These cannot be queued indefinitely as
+                * this would starve guests of grant refs and transmit
+                * slots.
+                */
+               xenvif_rx_queue_drop_expired(queue);
+
+               xenvif_rx_queue_maybe_wake(queue);
+
                cond_resched();
        }
 
        /* Bin any remaining skbs */
-       while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
-               dev_kfree_skb(skb);
+       xenvif_rx_queue_purge(queue);
 
        return 0;
 }
@@ -2113,6 +2195,7 @@ static int __init netback_init(void)
                goto failed_init;
 
        rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+       rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
 
 #ifdef CONFIG_DEBUG_FS
        xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
index 8079c31ac5e64372fe978405c8faa72981998320..4e56a27f9689a925ff3cf26118c6ee505eb963a4 100644 (file)
@@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
        struct xenvif_queue *queue = m->private;
        struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
        struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
+       struct netdev_queue *dev_queue;
 
        if (tx_ring->sring) {
                struct xen_netif_tx_sring *sring = tx_ring->sring;
@@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
                   queue->credit_timeout.expires,
                   jiffies);
 
+       dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
+
+       seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
+                  queue->rx_queue_len, queue->rx_queue_max,
+                  skb_queue_len(&queue->rx_queue),
+                  netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
+
        return 0;
 }
 
@@ -703,6 +711,7 @@ static void connect(struct backend_info *be)
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
        be->vif->num_queues = requested_num_queues;
+       be->vif->stalled_queues = requested_num_queues;
 
        for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
                queue = &be->vif->queues[queue_index];
@@ -873,15 +882,10 @@ static int read_xenbus_vif_flags(struct backend_info *be)
        if (!rx_copy)
                return -EOPNOTSUPP;
 
-       if (vif->dev->tx_queue_len != 0) {
-               if (xenbus_scanf(XBT_NIL, dev->otherend,
-                                "feature-rx-notify", "%d", &val) < 0)
-                       val = 0;
-               if (val)
-                       vif->can_queue = 1;
-               else
-                       /* Must be non-zero for pfifo_fast to work. */
-                       vif->dev->tx_queue_len = 1;
+       if (xenbus_scanf(XBT_NIL, dev->otherend,
+                        "feature-rx-notify", "%d", &val) < 0 || val == 0) {
+               xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
+               return -EINVAL;
        }
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
index 59fb12e84e6bb8da22f62c6de25817cf3d743682..dc566b38645f6fcd3a296aa5e82419c857b81bcc 100644 (file)
@@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
  * This function assign memory region pointed by "memory-region" device tree
  * property to the given device.
  */
-void of_reserved_mem_device_init(struct device *dev)
+int of_reserved_mem_device_init(struct device *dev)
 {
        struct reserved_mem *rmem;
        struct device_node *np;
+       int ret;
 
        np = of_parse_phandle(dev->of_node, "memory-region", 0);
        if (!np)
-               return;
+               return -ENODEV;
 
        rmem = __find_rmem(np);
        of_node_put(np);
 
        if (!rmem || !rmem->ops || !rmem->ops->device_init)
-               return;
+               return -EINVAL;
+
+       ret = rmem->ops->device_init(rmem, dev);
+       if (ret == 0)
+               dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
 
-       rmem->ops->device_init(rmem, dev);
-       dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
+       return ret;
 }
 
 /**
index 233fe8a882649dfc4197b8ac58ff1dd0bdaed6e9..69202d1eb8fbc1d0e91eccb64e2acedd03f16d62 100644 (file)
@@ -275,15 +275,22 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
                goto err_pcie;
        }
 
-       /* allow the clocks to stabilize */
-       usleep_range(200, 500);
-
        /* power up core phy and enable ref clock */
        regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
                        IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+       /*
+        * the async reset input need ref clock to sync internally,
+        * when the ref clock comes after reset, internal synced
+        * reset time is too short, cannot meet the requirement.
+        * add one ~10us delay here.
+        */
+       udelay(10);
        regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
                        IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
 
+       /* allow the clocks to stabilize */
+       usleep_range(200, 500);
+
        /* Some boards don't have PCIe reset GPIO. */
        if (gpio_is_valid(imx6_pcie->reset_gpio)) {
                gpio_set_value(imx6_pcie->reset_gpio, 0);
index 3a5e7e28b8740307efd8eb2aa5e69c9717cd0e81..07aa722bb12cd61a6a3a8767b2efe1dd826e6952 100644 (file)
@@ -262,13 +262,6 @@ static int pciehp_probe(struct pcie_device *dev)
                goto err_out_none;
        }
 
-       if (!dev->port->subordinate) {
-               /* Can happen if we run out of bus numbers during probe */
-               dev_err(&dev->device,
-                       "Hotplug bridge without secondary bus, ignoring\n");
-               goto err_out_none;
-       }
-
        ctrl = pcie_init(dev);
        if (!ctrl) {
                dev_err(&dev->device, "Controller initialization failed\n");
index 92b6d9ab00e498ac02fdd08ead2d4f3bfc76d207..2c6643fdc0cf27bcb9b726bb6c9febf3ca137dc7 100644 (file)
@@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(modalias);
 
-static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
        return result < 0 ? result : count;
 }
 
-static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
        struct pci_dev *pdev;
@@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
        pdev = to_pci_dev(dev);
        return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
 }
-static DEVICE_ATTR_RW(enabled);
+static DEVICE_ATTR_RW(enable);
 
 #ifdef CONFIG_NUMA
 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
@@ -563,7 +563,7 @@ static struct attribute *pci_dev_attrs[] = {
 #endif
        &dev_attr_dma_mask_bits.attr,
        &dev_attr_consistent_dma_mask_bits.attr,
-       &dev_attr_enabled.attr,
+       &dev_attr_enable.attr,
        &dev_attr_broken_parity_status.attr,
        &dev_attr_msi_bus.attr,
 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
index e305416d7697d2fd114ab86be2d7cd914484b649..196a5c8838c4cae8f63fb4f894dfaae9b00836d3 100644 (file)
@@ -44,7 +44,7 @@ static const int rk808_buck_config_regs[] = {
 };
 
 static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
-       REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500),
+       REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
 };
 
 static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
index 94ae1798d48a96876686e37ad10866d272cccfa2..6dd12ddbabc634b5b22fa8d4dac358c82557a4fd 100644 (file)
@@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX
 
 config RTC_DRV_PM8XXX
        tristate "Qualcomm PMIC8XXX RTC"
-       depends on MFD_PM8XXX
+       depends on MFD_PM8XXX || MFD_SPMI_PMIC
        help
          If you say yes here you get support for the
          Qualcomm PMIC8XXX RTC.
index 314129e66d6e8fd47280202cacf19f0793848e58..92679df6d6e222dd12e4f2471f9e1b3109048c50 100644 (file)
@@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node)
                        dev_err(dev, "bq32k: diode and resistor mismatch\n");
                        return -EINVAL;
                }
-               reg = 0x25;
+               reg = 0x45;
                break;
 
        default:
index 197699f358c7c18456d9ff7ab8130700001507a7..5adcf111fc1409ac12cd35869cad76cf58c93d1d 100644 (file)
 
 /* RTC_CTRL register bit fields */
 #define PM8xxx_RTC_ENABLE              BIT(7)
-#define PM8xxx_RTC_ALARM_ENABLE                BIT(1)
 #define PM8xxx_RTC_ALARM_CLEAR         BIT(0)
 
 #define NUM_8_BIT_RTC_REGS             0x4
 
+/**
+ * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
+ * @ctrl: base address of control register
+ * @write: base address of write register
+ * @read: base address of read register
+ * @alarm_ctrl: base address of alarm control register
+ * @alarm_ctrl2: base address of alarm control2 register
+ * @alarm_rw: base address of alarm read-write register
+ * @alarm_en: alarm enable mask
+ */
+struct pm8xxx_rtc_regs {
+       unsigned int ctrl;
+       unsigned int write;
+       unsigned int read;
+       unsigned int alarm_ctrl;
+       unsigned int alarm_ctrl2;
+       unsigned int alarm_rw;
+       unsigned int alarm_en;
+};
+
 /**
  * struct pm8xxx_rtc -  rtc driver internal structure
  * @rtc:               rtc device for this driver.
  * @regmap:            regmap used to access RTC registers
  * @allow_set_time:    indicates whether writing to the RTC is allowed
  * @rtc_alarm_irq:     rtc alarm irq number.
- * @rtc_base:          address of rtc control register.
- * @rtc_read_base:     base address of read registers.
- * @rtc_write_base:    base address of write registers.
- * @alarm_rw_base:     base address of alarm registers.
  * @ctrl_reg:          rtc control register.
  * @rtc_dev:           device structure.
  * @ctrl_reg_lock:     spinlock protecting access to ctrl_reg.
@@ -51,11 +66,7 @@ struct pm8xxx_rtc {
        struct regmap *regmap;
        bool allow_set_time;
        int rtc_alarm_irq;
-       int rtc_base;
-       int rtc_read_base;
-       int rtc_write_base;
-       int alarm_rw_base;
-       u8 ctrl_reg;
+       const struct pm8xxx_rtc_regs *regs;
        struct device *rtc_dev;
        spinlock_t ctrl_reg_lock;
 };
@@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        int rc, i;
        unsigned long secs, irq_flags;
-       u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg;
+       u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
+       unsigned int ctrl_reg;
        struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
        if (!rtc_dd->allow_set_time)
                return -EACCES;
@@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
        dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
 
        spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
-       ctrl_reg = rtc_dd->ctrl_reg;
 
-       if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) {
+       rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
+       if (rc)
+               goto rtc_rw_fail;
+
+       if (ctrl_reg & regs->alarm_en) {
                alarm_enabled = 1;
-               ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
-               rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+               ctrl_reg &= ~regs->alarm_en;
+               rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
                if (rc) {
                        dev_err(dev, "Write to RTC control register failed\n");
                        goto rtc_rw_fail;
                }
-               rtc_dd->ctrl_reg = ctrl_reg;
-       } else {
-               spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
        }
 
        /* Write 0 to Byte[0] */
-       rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0);
+       rc = regmap_write(rtc_dd->regmap, regs->write, 0);
        if (rc) {
                dev_err(dev, "Write to RTC write data register failed\n");
                goto rtc_rw_fail;
        }
 
        /* Write Byte[1], Byte[2], Byte[3] */
-       rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1,
+       rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
                               &value[1], sizeof(value) - 1);
        if (rc) {
                dev_err(dev, "Write to RTC write data register failed\n");
@@ -118,25 +131,23 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
        }
 
        /* Write Byte[0] */
-       rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]);
+       rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
        if (rc) {
                dev_err(dev, "Write to RTC write data register failed\n");
                goto rtc_rw_fail;
        }
 
        if (alarm_enabled) {
-               ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
-               rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+               ctrl_reg |= regs->alarm_en;
+               rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
                if (rc) {
                        dev_err(dev, "Write to RTC control register failed\n");
                        goto rtc_rw_fail;
                }
-               rtc_dd->ctrl_reg = ctrl_reg;
        }
 
 rtc_rw_fail:
-       if (alarm_enabled)
-               spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+       spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
 
        return rc;
 }
@@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
        unsigned long secs;
        unsigned int reg;
        struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
-       rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
-                             value, sizeof(value));
+       rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
        if (rc) {
                dev_err(dev, "RTC read data register failed\n");
                return rc;
@@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
         * Read the LSB again and check if there has been a carry over.
         * If there is, redo the read operation.
         */
-       rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, &reg);
+       rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
        if (rc < 0) {
                dev_err(dev, "RTC read data register failed\n");
                return rc;
        }
 
        if (unlikely(reg < value[0])) {
-               rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
+               rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
                                      value, sizeof(value));
                if (rc) {
                        dev_err(dev, "RTC read data register failed\n");
@@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
 static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
        int rc, i;
-       u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg;
+       u8 value[NUM_8_BIT_RTC_REGS];
+       unsigned int ctrl_reg;
        unsigned long secs, irq_flags;
        struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
        rtc_tm_to_time(&alarm->time, &secs);
 
@@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 
        spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
 
-       rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
+       rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
                               sizeof(value));
        if (rc) {
                dev_err(dev, "Write to RTC ALARM register failed\n");
                goto rtc_rw_fail;
        }
 
-       ctrl_reg = rtc_dd->ctrl_reg;
+       rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+       if (rc)
+               goto rtc_rw_fail;
 
        if (alarm->enabled)
-               ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+               ctrl_reg |= regs->alarm_en;
        else
-               ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+               ctrl_reg &= ~regs->alarm_en;
 
-       rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+       rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
        if (rc) {
-               dev_err(dev, "Write to RTC control register failed\n");
+               dev_err(dev, "Write to RTC alarm control register failed\n");
                goto rtc_rw_fail;
        }
 
-       rtc_dd->ctrl_reg = ctrl_reg;
-
        dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
                alarm->time.tm_hour, alarm->time.tm_min,
                alarm->time.tm_sec, alarm->time.tm_mday,
@@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        u8 value[NUM_8_BIT_RTC_REGS];
        unsigned long secs;
        struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
-       rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
+       rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
                              sizeof(value));
        if (rc) {
                dev_err(dev, "RTC alarm time read failed\n");
@@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
        int rc;
        unsigned long irq_flags;
        struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
-       u8 ctrl_reg;
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+       unsigned int ctrl_reg;
 
        spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
 
-       ctrl_reg = rtc_dd->ctrl_reg;
+       rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+       if (rc)
+               goto rtc_rw_fail;
 
        if (enable)
-               ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+               ctrl_reg |= regs->alarm_en;
        else
-               ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+               ctrl_reg &= ~regs->alarm_en;
 
-       rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+       rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
        if (rc) {
                dev_err(dev, "Write to RTC control register failed\n");
                goto rtc_rw_fail;
        }
 
-       rtc_dd->ctrl_reg = ctrl_reg;
-
 rtc_rw_fail:
        spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
        return rc;
@@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = {
 static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
 {
        struct pm8xxx_rtc *rtc_dd = dev_id;
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
        unsigned int ctrl_reg;
        int rc;
        unsigned long irq_flags;
@@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
        spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
 
        /* Clear the alarm enable bit */
-       ctrl_reg = rtc_dd->ctrl_reg;
-       ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+       rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+       if (rc) {
+               spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+               goto rtc_alarm_handled;
+       }
+
+       ctrl_reg &= ~regs->alarm_en;
 
-       rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+       rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
        if (rc) {
                spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
                dev_err(rtc_dd->rtc_dev,
-                       "Write to RTC control register failed\n");
+                       "Write to alarm control register failed\n");
                goto rtc_alarm_handled;
        }
 
-       rtc_dd->ctrl_reg = ctrl_reg;
        spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
 
        /* Clear RTC alarm register */
-       rc = regmap_read(rtc_dd->regmap,
-                        rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
-                        &ctrl_reg);
+       rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
        if (rc) {
                dev_err(rtc_dd->rtc_dev,
-                       "RTC Alarm control register read failed\n");
+                       "RTC Alarm control2 register read failed\n");
                goto rtc_alarm_handled;
        }
 
-       ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR;
-       rc = regmap_write(rtc_dd->regmap,
-                         rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
-                         ctrl_reg);
+       ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
+       rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
        if (rc)
                dev_err(rtc_dd->rtc_dev,
-                       "Write to RTC Alarm control register failed\n");
+                       "Write to RTC Alarm control2 register failed\n");
 
 rtc_alarm_handled:
        return IRQ_HANDLED;
 }
 
+static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
+{
+       const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+       unsigned int ctrl_reg;
+       int rc;
+
+       /* Check if the RTC is on, else turn it on */
+       rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
+       if (rc)
+               return rc;
+
+       if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
+               ctrl_reg |= PM8xxx_RTC_ENABLE;
+               rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static const struct pm8xxx_rtc_regs pm8921_regs = {
+       .ctrl           = 0x11d,
+       .write          = 0x11f,
+       .read           = 0x123,
+       .alarm_rw       = 0x127,
+       .alarm_ctrl     = 0x11d,
+       .alarm_ctrl2    = 0x11e,
+       .alarm_en       = BIT(1),
+};
+
+static const struct pm8xxx_rtc_regs pm8058_regs = {
+       .ctrl           = 0x1e8,
+       .write          = 0x1ea,
+       .read           = 0x1ee,
+       .alarm_rw       = 0x1f2,
+       .alarm_ctrl     = 0x1e8,
+       .alarm_ctrl2    = 0x1e9,
+       .alarm_en       = BIT(1),
+};
+
+static const struct pm8xxx_rtc_regs pm8941_regs = {
+       .ctrl           = 0x6046,
+       .write          = 0x6040,
+       .read           = 0x6048,
+       .alarm_rw       = 0x6140,
+       .alarm_ctrl     = 0x6146,
+       .alarm_ctrl2    = 0x6148,
+       .alarm_en       = BIT(7),
+};
+
 /*
  * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
  */
 static const struct of_device_id pm8xxx_id_table[] = {
-       { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D },
-       { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 },
+       { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
+       { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
+       { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
        { },
 };
 MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
@@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
 static int pm8xxx_rtc_probe(struct platform_device *pdev)
 {
        int rc;
-       unsigned int ctrl_reg;
        struct pm8xxx_rtc *rtc_dd;
        const struct of_device_id *match;
 
@@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
        rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
                                                      "allow-set-time");
 
-       rtc_dd->rtc_base = (long) match->data;
-
-       /* Setup RTC register addresses */
-       rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
-       rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
-       rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
-
+       rtc_dd->regs = match->data;
        rtc_dd->rtc_dev = &pdev->dev;
 
-       /* Check if the RTC is on, else turn it on */
-       rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg);
-       if (rc) {
-               dev_err(&pdev->dev, "RTC control register read failed!\n");
+       rc = pm8xxx_rtc_enable(rtc_dd);
+       if (rc)
                return rc;
-       }
-
-       if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
-               ctrl_reg |= PM8xxx_RTC_ENABLE;
-               rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
-               if (rc) {
-                       dev_err(&pdev->dev,
-                               "Write to RTC control register failed\n");
-                       return rc;
-               }
-       }
-
-       rtc_dd->ctrl_reg = ctrl_reg;
 
        platform_set_drvdata(pdev, rtc_dd);
 
index a6b1252c9941884a93dac49ed4c10c828ad0964d..806072238c00c4920e2eb344a651b8e604785b9f 100644 (file)
@@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev)
        }
        clk_prepare_enable(info->rtc_clk);
 
-       info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
-       if (IS_ERR(info->rtc_src_clk)) {
-               dev_err(&pdev->dev, "failed to find rtc source clock\n");
-               return PTR_ERR(info->rtc_src_clk);
+       if (info->data->needs_src_clk) {
+               info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
+               if (IS_ERR(info->rtc_src_clk)) {
+                       dev_err(&pdev->dev,
+                               "failed to find rtc source clock\n");
+                       return PTR_ERR(info->rtc_src_clk);
+               }
+               clk_prepare_enable(info->rtc_src_clk);
        }
-       clk_prepare_enable(info->rtc_src_clk);
-
 
        /* check to see if everything is setup correctly */
        if (info->data->enable)
index 72921588525008a0f4eaf615980eb884b8614108..72e12bad14b9c478a8025db3ef7d31601c083aa4 100644 (file)
@@ -669,6 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        master->cleanup = dw_spi_cleanup;
        master->transfer_one_message = dw_spi_transfer_one_message;
        master->max_speed_hz = dws->max_freq;
+       master->dev.of_node = dev->of_node;
 
        /* Basic HW init */
        spi_hw_init(dws);
index 835cdda6f4f586d1eb31fccf6904f4203a82ddcd..c76b7d7879dfe6539f80a174ceec9538db4f032c 100644 (file)
@@ -454,7 +454,7 @@ static int orion_spi_probe(struct platform_device *pdev)
        spi->master = master;
 
        of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
-       devdata = of_id->data;
+       devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
        spi->devdata = devdata;
 
        spi->clk = devm_clk_get(&pdev->dev, NULL);
index f35f723816eacb94a0921247dfbe5dbe74611e23..fc2dd8441608fe31ad482800312fe26722c384e1 100644 (file)
@@ -1106,7 +1106,7 @@ err_rxdesc:
                     pl022->sgt_tx.nents, DMA_TO_DEVICE);
 err_tx_sgmap:
        dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
-                    pl022->sgt_tx.nents, DMA_FROM_DEVICE);
+                    pl022->sgt_rx.nents, DMA_FROM_DEVICE);
 err_rx_sgmap:
        sg_free_table(&pl022->sgt_tx);
 err_alloc_tx_sg:
index f96ea8a38d640f988a797ecd448b2c9c64e9a8ed..87bc16f491f0ce8e3d78733d434a67189632e756 100644 (file)
 #define RXBUSY                                         (1 << 0)
 #define TXBUSY                                         (1 << 1)
 
+/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
+#define MAX_SCLK_OUT           50000000
+
 enum rockchip_ssi_type {
        SSI_MOTO_SPI = 0,
        SSI_TI_SSP,
@@ -325,6 +328,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
 
        spin_unlock_irqrestore(&rs->lock, flags);
 
+       spi_enable_chip(rs, 0);
+
        return 0;
 }
 
@@ -381,6 +386,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
        if (rs->tx)
                wait_for_idle(rs);
 
+       spi_enable_chip(rs, 0);
+
        return 0;
 }
 
@@ -392,8 +399,10 @@ static void rockchip_spi_dma_rxcb(void *data)
        spin_lock_irqsave(&rs->lock, flags);
 
        rs->state &= ~RXBUSY;
-       if (!(rs->state & TXBUSY))
+       if (!(rs->state & TXBUSY)) {
+               spi_enable_chip(rs, 0);
                spi_finalize_current_transfer(rs->master);
+       }
 
        spin_unlock_irqrestore(&rs->lock, flags);
 }
@@ -409,8 +418,10 @@ static void rockchip_spi_dma_txcb(void *data)
        spin_lock_irqsave(&rs->lock, flags);
 
        rs->state &= ~TXBUSY;
-       if (!(rs->state & RXBUSY))
+       if (!(rs->state & RXBUSY)) {
+               spi_enable_chip(rs, 0);
                spi_finalize_current_transfer(rs->master);
+       }
 
        spin_unlock_irqrestore(&rs->lock, flags);
 }
@@ -496,12 +507,19 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
                        dmacr |= RF_DMA_EN;
        }
 
+       if (WARN_ON(rs->speed > MAX_SCLK_OUT))
+               rs->speed = MAX_SCLK_OUT;
+
+       /* the minimum divsor is 2 */
+       if (rs->max_freq < 2 * rs->speed) {
+               clk_set_rate(rs->spiclk, 2 * rs->speed);
+               rs->max_freq = clk_get_rate(rs->spiclk);
+       }
+
        /* div doesn't support odd number */
        div = max_t(u32, rs->max_freq / rs->speed, 1);
        div = (div + 1) & 0xfffe;
 
-       spi_enable_chip(rs, 0);
-
        writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
 
        writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
@@ -515,8 +533,6 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
        spi_set_clk(rs, div);
 
        dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
-
-       spi_enable_chip(rs, 1);
 }
 
 static int rockchip_spi_transfer_one(
@@ -524,7 +540,7 @@ static int rockchip_spi_transfer_one(
                struct spi_device *spi,
                struct spi_transfer *xfer)
 {
-       int ret = 0;
+       int ret = 1;
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -556,17 +572,27 @@ static int rockchip_spi_transfer_one(
                rs->tmode = CR0_XFM_RO;
 
        /* we need prepare dma before spi was enabled */
-       if (master->can_dma && master->can_dma(master, spi, xfer)) {
+       if (master->can_dma && master->can_dma(master, spi, xfer))
                rs->use_dma = 1;
-               rockchip_spi_prepare_dma(rs);
-       } else {
+       else
                rs->use_dma = 0;
-       }
 
        rockchip_spi_config(rs);
 
-       if (!rs->use_dma)
+       if (rs->use_dma) {
+               if (rs->tmode == CR0_XFM_RO) {
+                       /* rx: dma must be prepared first */
+                       rockchip_spi_prepare_dma(rs);
+                       spi_enable_chip(rs, 1);
+               } else {
+                       /* tx or tr: spi must be enabled first */
+                       spi_enable_chip(rs, 1);
+                       rockchip_spi_prepare_dma(rs);
+               }
+       } else {
+               spi_enable_chip(rs, 1);
                ret = rockchip_spi_pio_transfer(rs);
+       }
 
        return ret;
 }
index e3bc23bb588340f6856efe33e2d26fc4f6156ea4..e50039fb14749f0f700505c3be183f1ad52e693a 100644 (file)
@@ -82,10 +82,11 @@ struct spidev_data {
        struct spi_device       *spi;
        struct list_head        device_entry;
 
-       /* buffer is NULL unless this device is open (users > 0) */
+       /* TX/RX buffers are NULL unless this device is open (users > 0) */
        struct mutex            buf_lock;
        unsigned                users;
-       u8                      *buffer;
+       u8                      *tx_buffer;
+       u8                      *rx_buffer;
 };
 
 static LIST_HEAD(device_list);
@@ -135,7 +136,7 @@ static inline ssize_t
 spidev_sync_write(struct spidev_data *spidev, size_t len)
 {
        struct spi_transfer     t = {
-                       .tx_buf         = spidev->buffer,
+                       .tx_buf         = spidev->tx_buffer,
                        .len            = len,
                };
        struct spi_message      m;
@@ -149,7 +150,7 @@ static inline ssize_t
 spidev_sync_read(struct spidev_data *spidev, size_t len)
 {
        struct spi_transfer     t = {
-                       .rx_buf         = spidev->buffer,
+                       .rx_buf         = spidev->rx_buffer,
                        .len            = len,
                };
        struct spi_message      m;
@@ -179,7 +180,7 @@ spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
        if (status > 0) {
                unsigned long   missing;
 
-               missing = copy_to_user(buf, spidev->buffer, status);
+               missing = copy_to_user(buf, spidev->rx_buffer, status);
                if (missing == status)
                        status = -EFAULT;
                else
@@ -206,7 +207,7 @@ spidev_write(struct file *filp, const char __user *buf,
        spidev = filp->private_data;
 
        mutex_lock(&spidev->buf_lock);
-       missing = copy_from_user(spidev->buffer, buf, count);
+       missing = copy_from_user(spidev->tx_buffer, buf, count);
        if (missing == 0)
                status = spidev_sync_write(spidev, count);
        else
@@ -224,7 +225,7 @@ static int spidev_message(struct spidev_data *spidev,
        struct spi_transfer     *k_tmp;
        struct spi_ioc_transfer *u_tmp;
        unsigned                n, total;
-       u8                      *buf;
+       u8                      *tx_buf, *rx_buf;
        int                     status = -EFAULT;
 
        spi_message_init(&msg);
@@ -236,7 +237,8 @@ static int spidev_message(struct spidev_data *spidev,
         * We walk the array of user-provided transfers, using each one
         * to initialize a kernel version of the same transfer.
         */
-       buf = spidev->buffer;
+       tx_buf = spidev->tx_buffer;
+       rx_buf = spidev->rx_buffer;
        total = 0;
        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
                        n;
@@ -250,20 +252,21 @@ static int spidev_message(struct spidev_data *spidev,
                }
 
                if (u_tmp->rx_buf) {
-                       k_tmp->rx_buf = buf;
+                       k_tmp->rx_buf = rx_buf;
                        if (!access_ok(VERIFY_WRITE, (u8 __user *)
                                                (uintptr_t) u_tmp->rx_buf,
                                                u_tmp->len))
                                goto done;
                }
                if (u_tmp->tx_buf) {
-                       k_tmp->tx_buf = buf;
-                       if (copy_from_user(buf, (const u8 __user *)
+                       k_tmp->tx_buf = tx_buf;
+                       if (copy_from_user(tx_buf, (const u8 __user *)
                                                (uintptr_t) u_tmp->tx_buf,
                                        u_tmp->len))
                                goto done;
                }
-               buf += k_tmp->len;
+               tx_buf += k_tmp->len;
+               rx_buf += k_tmp->len;
 
                k_tmp->cs_change = !!u_tmp->cs_change;
                k_tmp->tx_nbits = u_tmp->tx_nbits;
@@ -290,17 +293,17 @@ static int spidev_message(struct spidev_data *spidev,
                goto done;
 
        /* copy any rx data out of bounce buffer */
-       buf = spidev->buffer;
+       rx_buf = spidev->rx_buffer;
        for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
                if (u_tmp->rx_buf) {
                        if (__copy_to_user((u8 __user *)
-                                       (uintptr_t) u_tmp->rx_buf, buf,
+                                       (uintptr_t) u_tmp->rx_buf, rx_buf,
                                        u_tmp->len)) {
                                status = -EFAULT;
                                goto done;
                        }
                }
-               buf += u_tmp->len;
+               rx_buf += u_tmp->len;
        }
        status = total;
 
@@ -508,22 +511,41 @@ static int spidev_open(struct inode *inode, struct file *filp)
                        break;
                }
        }
-       if (status == 0) {
-               if (!spidev->buffer) {
-                       spidev->buffer = kmalloc(bufsiz, GFP_KERNEL);
-                       if (!spidev->buffer) {
+
+       if (status) {
+               pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+               goto err_find_dev;
+       }
+
+       if (!spidev->tx_buffer) {
+               spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+               if (!spidev->tx_buffer) {
                                dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
                                status = -ENOMEM;
+                       goto err_find_dev;
                        }
                }
-               if (status == 0) {
-                       spidev->users++;
-                       filp->private_data = spidev;
-                       nonseekable_open(inode, filp);
+
+       if (!spidev->rx_buffer) {
+               spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+               if (!spidev->rx_buffer) {
+                       dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+                       status = -ENOMEM;
+                       goto err_alloc_rx_buf;
                }
-       } else
-               pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+       }
+
+       spidev->users++;
+       filp->private_data = spidev;
+       nonseekable_open(inode, filp);
+
+       mutex_unlock(&device_list_lock);
+       return 0;
 
+err_alloc_rx_buf:
+       kfree(spidev->tx_buffer);
+       spidev->tx_buffer = NULL;
+err_find_dev:
        mutex_unlock(&device_list_lock);
        return status;
 }
@@ -542,8 +564,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
        if (!spidev->users) {
                int             dofree;
 
-               kfree(spidev->buffer);
-               spidev->buffer = NULL;
+               kfree(spidev->tx_buffer);
+               spidev->tx_buffer = NULL;
+
+               kfree(spidev->rx_buffer);
+               spidev->rx_buffer = NULL;
 
                /* ... after we unbound from the underlying device? */
                spin_lock_irq(&spidev->spi_lock);
index 57b1d44acbfe3d0782e0960a70eefadcdf864b36..eb976ee3a02f30d6fd751c813318c18fbf026627 100644 (file)
@@ -448,8 +448,10 @@ static int __init fb_console_setup(char *this_opt)
                return 1;
 
        while ((options = strsep(&this_opt, ",")) != NULL) {
-               if (!strncmp(options, "font:", 5))
+               if (!strncmp(options, "font:", 5)) {
                        strlcpy(fontname, options + 5, sizeof(fontname));
+                       continue;
+               }
                
                if (!strncmp(options, "scrollback:", 11)) {
                        options += 11;
@@ -457,13 +459,9 @@ static int __init fb_console_setup(char *this_opt)
                                fbcon_softback_size = simple_strtoul(options, &options, 0);
                                if (*options == 'k' || *options == 'K') {
                                        fbcon_softback_size *= 1024;
-                                       options++;
                                }
-                               if (*options != ',')
-                                       return 1;
-                               options++;
-                       } else
-                               return 1;
+                       }
+                       continue;
                }
                
                if (!strncmp(options, "map:", 4)) {
@@ -478,8 +476,7 @@ static int __init fb_console_setup(char *this_opt)
 
                                fbcon_map_override();
                        }
-
-                       return 1;
+                       continue;
                }
 
                if (!strncmp(options, "vc:", 3)) {
@@ -491,7 +488,8 @@ static int __init fb_console_setup(char *this_opt)
                        if (*options++ == '-')
                                last_fb_vc = simple_strtoul(options, &options, 10) - 1;
                        fbcon_is_default = 0; 
-               }       
+                       continue;
+               }
 
                if (!strncmp(options, "rotate:", 7)) {
                        options += 7;
@@ -499,6 +497,7 @@ static int __init fb_console_setup(char *this_opt)
                                initial_rotation = simple_strtoul(options, &options, 0);
                        if (initial_rotation > 3)
                                initial_rotation = 0;
+                       continue;
                }
        }
        return 1;
index 6e6aa704fe84a1e95bf041ece16dc6f41ec3cf03..517f565b65d760863a05577f60984300532f2f6d 100644 (file)
@@ -56,7 +56,7 @@ static int cursor_size_lastfrom;
 static int cursor_size_lastto;
 static u32 vgacon_xres;
 static u32 vgacon_yres;
-static struct vgastate state;
+static struct vgastate vgastate;
 
 #define BLANK 0x0020
 
@@ -400,7 +400,7 @@ static const char *vgacon_startup(void)
 
        vga_video_num_lines = screen_info.orig_video_lines;
        vga_video_num_columns = screen_info.orig_video_cols;
-       state.vgabase = NULL;
+       vgastate.vgabase = NULL;
 
        if (screen_info.orig_video_mode == 7) {
                /* Monochrome display */
@@ -851,12 +851,12 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table)
 {
        int i, j;
 
-       vga_w(state.vgabase, VGA_PEL_MSK, 0xff);
+       vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff);
        for (i = j = 0; i < 16; i++) {
-               vga_w(state.vgabase, VGA_PEL_IW, table[i]);
-               vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
-               vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
-               vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
+               vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]);
+               vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
+               vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
+               vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
        }
 }
 
@@ -1008,7 +1008,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
        switch (blank) {
        case 0:         /* Unblank */
                if (vga_vesa_blanked) {
-                       vga_vesa_unblank(&state);
+                       vga_vesa_unblank(&vgastate);
                        vga_vesa_blanked = 0;
                }
                if (vga_palette_blanked) {
@@ -1022,7 +1022,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
        case 1:         /* Normal blanking */
        case -1:        /* Obsolete */
                if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
-                       vga_pal_blank(&state);
+                       vga_pal_blank(&vgastate);
                        vga_palette_blanked = 1;
                        return 0;
                }
@@ -1034,7 +1034,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
                return 1;
        default:                /* VESA blanking */
                if (vga_video_type == VIDEO_TYPE_VGAC) {
-                       vga_vesa_blank(&state, blank - 1);
+                       vga_vesa_blank(&vgastate, blank - 1);
                        vga_vesa_blanked = blank;
                }
                return 0;
@@ -1280,7 +1280,7 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigne
            (charcount != 256 && charcount != 512))
                return -EINVAL;
 
-       rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512);
+       rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512);
        if (rc)
                return rc;
 
@@ -1299,7 +1299,7 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
        font->charcount = vga_512_chars ? 512 : 256;
        if (!font->data)
                return 0;
-       return vgacon_do_font_op(&state, font->data, 0, vga_512_chars);
+       return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
 }
 
 #else
index 3bf403150a2d6fbfcf9814df777e1ab51853c72b..9ec81d46fc5785d45a31e159b498513f62139188 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/regulator/consumer.h>
 #include <video/videomode.h>
 
-#include <mach/cpu.h>
 #include <asm/gpio.h>
 
 #include <video/atmel_lcdc.h>
index 5ee3b5505f7fb3d53aff30d0e79a146011df9609..91921665b98b6c3932f7bd6829972e0093569955 100644 (file)
@@ -301,6 +301,8 @@ static const struct of_device_id tvc_of_match[] = {
        {},
 };
 
+MODULE_DEVICE_TABLE(of, tvc_of_match);
+
 static struct platform_driver tvc_connector_driver = {
        .probe  = tvc_probe,
        .remove = __exit_p(tvc_remove),
@@ -308,6 +310,7 @@ static struct platform_driver tvc_connector_driver = {
                .name   = "connector-analog-tv",
                .owner  = THIS_MODULE,
                .of_match_table = tvc_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 74de2bc50c4feca760cb9b0928a24eb288108610..2dfb6e5ff0cc135b32055a2fb5e9a8d6abad29c4 100644 (file)
@@ -391,6 +391,7 @@ static struct platform_driver dvi_connector_driver = {
                .name   = "connector-dvi",
                .owner  = THIS_MODULE,
                .of_match_table = dvic_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 131c6e26089802f4d01feadff8213d4d6dfb1aa4..7b25967a91ebc6235c170e7925ddb229ed8b34df 100644 (file)
@@ -437,6 +437,7 @@ static struct platform_driver hdmi_connector_driver = {
                .name   = "connector-hdmi",
                .owner  = THIS_MODULE,
                .of_match_table = hdmic_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index b4e9a42a79e61cc58d9dc0abd50811e0db694d48..47ee7cdee1c5881a727f97d1863de1f26d6efcc8 100644 (file)
@@ -298,6 +298,7 @@ static struct platform_driver tfp410_driver = {
                .name   = "tfp410",
                .owner  = THIS_MODULE,
                .of_match_table = tfp410_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index c891d8f84cb2706b3fe64777bd2629889fcccb5a..c4abd56dd84697e306e649ff0365fb4504714812 100644 (file)
@@ -461,6 +461,7 @@ static struct platform_driver tpd_driver = {
                .name   = "tpd12s015",
                .owner  = THIS_MODULE,
                .of_match_table = tpd_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 3636b61dc9b4296d0255f89daee0cde1e0860f0c..a9c3dcf0f6b59cefaafcdd3e724062b9e8e6485a 100644 (file)
@@ -327,6 +327,7 @@ static struct platform_driver panel_dpi_driver = {
                .name = "panel-dpi",
                .owner = THIS_MODULE,
                .of_match_table = panel_dpi_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index d6f14e8717e89e87d978131f3b05d8696536a02a..899cb1ab523df05ca6205ddcfe96172bd7559abc 100644 (file)
@@ -1378,6 +1378,7 @@ static struct platform_driver dsicm_driver = {
                .name = "panel-dsi-cm",
                .owner = THIS_MODULE,
                .of_match_table = dsicm_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index cc5b5124e0b4b05409c8ef11a4676bc08992b126..27d4fcfa1824313a7e3799f37b8c7a0deb6c89e7 100644 (file)
@@ -394,6 +394,7 @@ static struct spi_driver lb035q02_spi_driver = {
                .name   = "panel_lgphilips_lb035q02",
                .owner  = THIS_MODULE,
                .of_match_table = lb035q02_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 3595f111aa350c89896b01d1cccfcc89af9740be..ccf3f4f3c70355580ee62dabaf62b6c09e7d4f5c 100644 (file)
@@ -424,6 +424,7 @@ static struct spi_driver nec_8048_driver = {
                .owner  = THIS_MODULE,
                .pm     = NEC_8048_PM_OPS,
                .of_match_table = nec_8048_of_match,
+               .suppress_bind_attrs = true,
        },
        .probe  = nec_8048_probe,
        .remove = nec_8048_remove,
index f1f72ce50a1705b3f6113140b3900226ea834e10..234142cc37641ad10f668118df65f60b2deb04b1 100644 (file)
@@ -410,6 +410,7 @@ static struct platform_driver sharp_ls_driver = {
                .name = "panel-sharp-ls037v7dw01",
                .owner = THIS_MODULE,
                .of_match_table = sharp_ls_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 617f8d2f51273bc48fc474fca9013f5aa7764a5d..337ccc5c0f5eca846b62ba43b053d0f40f1cf335 100644 (file)
@@ -904,6 +904,7 @@ static struct spi_driver acx565akm_driver = {
                .name   = "acx565akm",
                .owner  = THIS_MODULE,
                .of_match_table = acx565akm_of_match,
+               .suppress_bind_attrs = true,
        },
        .probe  = acx565akm_probe,
        .remove = acx565akm_remove,
index 728808bcceebc6545161cfa7925b60a0106d0fe7..fbba0b8ca8718abcb08f7bc8b03e4947e93dc9ba 100644 (file)
@@ -500,6 +500,7 @@ static struct spi_driver td028ttec1_spi_driver = {
                .name   = "panel-tpo-td028ttec1",
                .owner  = THIS_MODULE,
                .of_match_table = td028ttec1_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index de78ab0caaa851ca19fcd589a251f89e4804915d..5aba76bca25a03bfab66dcfe24c485fcf07982fd 100644 (file)
@@ -673,6 +673,7 @@ static struct spi_driver tpo_td043_spi_driver = {
                .owner  = THIS_MODULE,
                .pm     = &tpo_td043_spi_pm,
                .of_match_table = tpo_td043_of_match,
+               .suppress_bind_attrs = true,
        },
        .probe  = tpo_td043_probe,
        .remove = tpo_td043_remove,
index 0a0b084ce65d626ca7619f81065b914d4286bb89..663ccc3bf4e5cebb63ef05250a1e5d9d8edeaa07 100644 (file)
@@ -1132,6 +1132,8 @@ static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
        if (!mp->enabled)
                goto out;
 
+       wait_pending_extra_info_updates();
+
        if (!mgr_manual_update(mgr))
                dispc_mgr_disable_sync(mgr->id);
 
index be053aa80880d0bb1cfcc8f3fb6d908e9f7be80d..0e9a74bb9fc2d8c1cbd2de9ca73b20f94e5f2199 100644 (file)
@@ -3290,8 +3290,11 @@ static void dispc_dump_regs(struct seq_file *s)
                DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
                DUMPREG(i, DISPC_OVL_ROW_INC);
                DUMPREG(i, DISPC_OVL_PIXEL_INC);
+
                if (dss_has_feature(FEAT_PRELOAD))
                        DUMPREG(i, DISPC_OVL_PRELOAD);
+               if (dss_has_feature(FEAT_MFLAG))
+                       DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
 
                if (i == OMAP_DSS_GFX) {
                        DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
@@ -3312,10 +3315,6 @@ static void dispc_dump_regs(struct seq_file *s)
                }
                if (dss_has_feature(FEAT_ATTR2))
                        DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
-               if (dss_has_feature(FEAT_PRELOAD))
-                       DUMPREG(i, DISPC_OVL_PRELOAD);
-               if (dss_has_feature(FEAT_MFLAG))
-                       DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
        }
 
 #undef DISPC_REG
@@ -3843,6 +3842,7 @@ static struct platform_driver omap_dispchw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &dispc_pm_ops,
                .of_match_table = dispc_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 78edb449c763bf3efc448c3d95c4bd459c3c9002..3043d6e0a5f93c465f7da73b96b308632a4c6c5f 100644 (file)
                                        DISPC_FIR_COEF_V2_OFFSET(n, i))
 #define DISPC_OVL_PRELOAD(n)           (DISPC_OVL_BASE(n) + \
                                        DISPC_PRELOAD_OFFSET(n))
-#define DISPC_OVL_MFLAG_THRESHOLD(n)   (DISPC_OVL_BASE(n) + \
-                                       DISPC_MFLAG_THRESHOLD_OFFSET(n))
+#define DISPC_OVL_MFLAG_THRESHOLD(n)   DISPC_MFLAG_THRESHOLD_OFFSET(n)
 
 /* DISPC up/downsampling FIR filter coefficient structure */
 struct dispc_coef {
index 9368972d696210fbc7ff8d3f91b787b275fd6be2..4a3363dae74aa7479722732102ab4e22ca28fbce 100644 (file)
@@ -720,6 +720,7 @@ static struct platform_driver omap_dpi_driver = {
        .driver         = {
                .name   = "omapdss_dpi",
                .owner  = THIS_MODULE,
+               .suppress_bind_attrs = true,
        },
 };
 
index b6f6ae1d4664e9c8045f5556af19fa6479d0b0d9..0793bc67a275864ece56a4a8605553c5535704ea 100644 (file)
@@ -1603,7 +1603,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
        } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
                f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
 
-               l = FLD_MOD(l, f, 4, 1);        /* PLL_SELFREQDCO */
+               l = FLD_MOD(l, f, 3, 1);        /* PLL_SELFREQDCO */
        }
 
        l = FLD_MOD(l, 1, 13, 13);              /* DSI_PLL_REFEN */
@@ -5754,6 +5754,7 @@ static struct platform_driver omap_dsihw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &dsi_pm_ops,
                .of_match_table = dsi_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 6daeb7ed44c685cd09d2831664c37b2f4071d29f..14bcd6c43f726d6c9e6c88baa2880f80bf8c97b5 100644 (file)
@@ -966,6 +966,7 @@ static struct platform_driver omap_dsshw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &dss_pm_ops,
                .of_match_table = dss_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 6a8550cf43e51b4dc148075dceece5aeab181f94..9a8713ca090c01561548a4e96d37cfd6fb5fb1db 100644 (file)
@@ -781,6 +781,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 32d02ec34d2322cdde42481a45ef7801c5d3a90d..169b764bb9d48f6cb188d075a740e0cd4da4dbb3 100644 (file)
@@ -806,6 +806,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 54df12a8d744a1b12dc11de98dbf227294ef128c..6d92bb32fe5192e90333b8a38b0db4fa95f341e9 100644 (file)
@@ -124,16 +124,15 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
        r = FLD_MOD(r, 0x0, 14, 14);    /* PHY_CLKINEN de-assert during locking */
        r = FLD_MOD(r, fmt->refsel, 22, 21);    /* REFSEL */
 
-       if (fmt->dcofreq) {
-               /* divider programming for frequency beyond 1000Mhz */
-               REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+       if (fmt->dcofreq)
                r = FLD_MOD(r, 0x4, 3, 1);      /* 1000MHz and 2000MHz */
-       } else {
+       else
                r = FLD_MOD(r, 0x2, 3, 1);      /* 500MHz and 1000MHz */
-       }
 
        hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
 
+       REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+
        r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
        r = FLD_MOD(r, fmt->regm2, 24, 18);
        r = FLD_MOD(r, fmt->regmf, 17, 0);
@@ -144,8 +143,8 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
 
        /* wait for bit change */
        if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
-                       0, 0, 1) != 1) {
-               DSSERR("PLL GO bit not set\n");
+                       0, 0, 0) != 0) {
+               DSSERR("PLL GO bit not clearing\n");
                return -ETIMEDOUT;
        }
 
index c8a81a2b879cd9ea37e4cae59cf1a88e432e4b3f..878273f58839bb84cd72c24be915a21e9d22e2ff 100644 (file)
@@ -1044,6 +1044,7 @@ static struct platform_driver omap_rfbihw_driver = {
                .name   = "omapdss_rfbi",
                .owner  = THIS_MODULE,
                .pm     = &rfbi_pm_ops,
+               .suppress_bind_attrs = true,
        },
 };
 
index 911dcc9173a6b1e7f0b8e8ae508d8d0ec4bd652b..4c9c46d4ea60dbe2752c11f1933af4788ff41559 100644 (file)
@@ -377,6 +377,7 @@ static struct platform_driver omap_sdi_driver = {
        .driver         = {
                .name   = "omapdss_sdi",
                .owner  = THIS_MODULE,
+               .suppress_bind_attrs = true,
        },
 };
 
index 21d81113962bb81df71783be6f21ac4b7ef9e7b1..d077d8a75ddc8aeecd03d416ee1c080efed09145 100644 (file)
@@ -966,6 +966,7 @@ static struct platform_driver omap_venchw_driver = {
                .owner  = THIS_MODULE,
                .pm     = &venc_pm_ops,
                .of_match_table = venc_of_match,
+               .suppress_bind_attrs = true,
        },
 };
 
index 15872433e0c6a9ae00a209efe181dc3a1bea3a8c..ce8a705707562a4fb2bb91db8a6aec0ea2167f4e 100644 (file)
@@ -1833,14 +1833,13 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
        if (fbdev == NULL)
                return;
 
-       for (i = 0; i < fbdev->num_fbs; i++) {
-               struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
-               int j;
+       for (i = 0; i < fbdev->num_overlays; i++) {
+               struct omap_overlay *ovl = fbdev->overlays[i];
 
-               for (j = 0; j < ofbi->num_overlays; j++) {
-                       struct omap_overlay *ovl = ofbi->overlays[j];
-                       ovl->disable(ovl);
-               }
+               ovl->disable(ovl);
+
+               if (ovl->manager)
+                       ovl->unset_manager(ovl);
        }
 
        for (i = 0; i < fbdev->num_fbs; i++)
@@ -2619,7 +2618,7 @@ err0:
        return r;
 }
 
-static int __exit omapfb_remove(struct platform_device *pdev)
+static int omapfb_remove(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
 
@@ -2636,7 +2635,7 @@ static int __exit omapfb_remove(struct platform_device *pdev)
 
 static struct platform_driver omapfb_driver = {
        .probe          = omapfb_probe,
-       .remove         = __exit_p(omapfb_remove),
+       .remove         = omapfb_remove,
        .driver         = {
                .name   = "omapfb",
                .owner  = THIS_MODULE,
@@ -2651,6 +2650,7 @@ module_param_named(mirror, def_mirror, bool, 0);
 
 module_platform_driver(omapfb_driver);
 
+MODULE_ALIAS("platform:omapfb");
 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
 MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
 MODULE_LICENSE("GPL v2");
index 6c48f20eddd4b60256c0e825e53dbbf724d115cb..20805db2c98774a8cbe3242d37c63f370149f1a9 100644 (file)
@@ -128,21 +128,15 @@ __clear_page_buffers(struct page *page)
        page_cache_release(page);
 }
 
-
-static int quiet_error(struct buffer_head *bh)
-{
-       if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
-               return 0;
-       return 1;
-}
-
-
-static void buffer_io_error(struct buffer_head *bh)
+static void buffer_io_error(struct buffer_head *bh, char *msg)
 {
        char b[BDEVNAME_SIZE];
-       printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
+
+       if (!test_bit(BH_Quiet, &bh->b_state))
+               printk_ratelimited(KERN_ERR
+                       "Buffer I/O error on dev %s, logical block %llu%s\n",
                        bdevname(bh->b_bdev, b),
-                       (unsigned long long)bh->b_blocknr);
+                       (unsigned long long)bh->b_blocknr, msg);
 }
 
 /*
@@ -177,17 +171,10 @@ EXPORT_SYMBOL(end_buffer_read_sync);
 
 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
-       char b[BDEVNAME_SIZE];
-
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!quiet_error(bh)) {
-                       buffer_io_error(bh);
-                       printk(KERN_WARNING "lost page write due to "
-                                       "I/O error on %s\n",
-                                      bdevname(bh->b_bdev, b));
-               }
+               buffer_io_error(bh, ", lost sync page write");
                set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
        }
@@ -304,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                set_buffer_uptodate(bh);
        } else {
                clear_buffer_uptodate(bh);
-               if (!quiet_error(bh))
-                       buffer_io_error(bh);
+               buffer_io_error(bh, ", async page read");
                SetPageError(page);
        }
 
@@ -353,7 +339,6 @@ still_busy:
  */
 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
-       char b[BDEVNAME_SIZE];
        unsigned long flags;
        struct buffer_head *first;
        struct buffer_head *tmp;
@@ -365,12 +350,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!quiet_error(bh)) {
-                       buffer_io_error(bh);
-                       printk(KERN_WARNING "lost page write due to "
-                                       "I/O error on %s\n",
-                              bdevname(bh->b_bdev, b));
-               }
+               buffer_io_error(bh, ", lost async page write");
                set_bit(AS_EIO, &page->mapping->flags);
                set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
index 7015db0bafd1b0c3179e6a9eb8de8c1e06465751..eb742d0e67ff75f159b67d3d0cac6e28b1932cf9 100644 (file)
@@ -1354,13 +1354,6 @@ set_qf_format:
                                        "not specified.");
                        return 0;
                }
-       } else {
-               if (sbi->s_jquota_fmt) {
-                       ext3_msg(sb, KERN_ERR, "error: journaled quota format "
-                                       "specified with no journaling "
-                                       "enabled.");
-                       return 0;
-               }
        }
 #endif
        return 1;
index 37043d0b2be8f034f6936d21594a20ae4053f3f8..0b16fb4c06d3e176e367bced078d014542a97b2a 100644 (file)
@@ -3603,11 +3603,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                }
        }
 
-       allocated = ext4_split_extent(handle, inode, ppath,
-                                     &split_map, split_flag, flags);
-       if (allocated < 0)
-               err = allocated;
-
+       err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
+                               flags);
+       if (err > 0)
+               err = 0;
 out:
        /* If we have gotten a failure, don't zero out status tree */
        if (!err)
index aca7b24a443243c3415221edd082c71e2a1a2ce5..8131be8c0af3166aac865557baa9f0371564a397 100644 (file)
@@ -137,10 +137,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
        }
 
+       iocb->private = &overwrite;
        if (o_direct) {
                blk_start_plug(&plug);
 
-               iocb->private = &overwrite;
 
                /* check whether we do a DIO overwrite or not */
                if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
index 8012a5daf4019d953d3b3d79d5d2a2b4d41ac59f..ac644c31ca67472f3a16f91c6107e3d06772f66f 100644 (file)
@@ -887,6 +887,10 @@ got:
                struct buffer_head *block_bitmap_bh;
 
                block_bitmap_bh = ext4_read_block_bitmap(sb, group);
+               if (!block_bitmap_bh) {
+                       err = -EIO;
+                       goto out;
+               }
                BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
                err = ext4_journal_get_write_access(handle, block_bitmap_bh);
                if (err) {
index e9777f93cf05a2e9c9776c9a57fa2798b14ab703..3356ab5395f469c0db1eaa8da1cb79ed58277e93 100644 (file)
@@ -4959,7 +4959,12 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
        if (val)
                ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
        else {
-               jbd2_journal_flush(journal);
+               err = jbd2_journal_flush(journal);
+               if (err < 0) {
+                       jbd2_journal_unlock_updates(journal);
+                       ext4_inode_resume_unlocked_dio(inode);
+                       return err;
+               }
                ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
        }
        ext4_set_aops(inode);
index 123798c5ac314c3ff7522c84de2907c6e7587800..426211882f7274b496c5c94ecac30fbd18a57325 100644 (file)
@@ -1816,31 +1816,39 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
                hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
        hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
        ext4fs_dirhash(name, namelen, &hinfo);
+       memset(frames, 0, sizeof(frames));
        frame = frames;
        frame->entries = entries;
        frame->at = entries;
        frame->bh = bh;
        bh = bh2;
 
-       ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-       ext4_handle_dirty_dirent_node(handle, dir, bh);
+       retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+       if (retval)
+               goto out_frames;        
+       retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
+       if (retval)
+               goto out_frames;        
 
        de = do_split(handle,dir, &bh, frame, &hinfo);
        if (IS_ERR(de)) {
-               /*
-                * Even if the block split failed, we have to properly write
-                * out all the changes we did so far. Otherwise we can end up
-                * with corrupted filesystem.
-                */
-               ext4_mark_inode_dirty(handle, dir);
-               dx_release(frames);
-               return PTR_ERR(de);
+               retval = PTR_ERR(de);
+               goto out_frames;
        }
        dx_release(frames);
 
        retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
        brelse(bh);
        return retval;
+out_frames:
+       /*
+        * Even if the block split failed, we have to properly write
+        * out all the changes we did so far. Otherwise we can end up
+        * with corrupted filesystem.
+        */
+       ext4_mark_inode_dirty(handle, dir);
+       dx_release(frames);
+       return retval;
 }
 
 /*
index f298c60f907d9ec559186fe4e9f2026730346fde..ca4588388fc30bca45887f0f9af9fb8422d9af7f 100644 (file)
@@ -1081,7 +1081,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
                        break;
 
                if (meta_bg == 0)
-                       backup_block = group * bpg + blk_off;
+                       backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
                else
                        backup_block = (ext4_group_first_block_no(sb, group) +
                                        ext4_bg_has_super(sb, group));
index 1eda6ab0ef9d3a3be99b89242127850c73630ff3..2c9e6864abd932f007d107c65615ad440cd56f2c 100644 (file)
@@ -3526,6 +3526,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
        set_opt(sb, POSIX_ACL);
 #endif
+       /* don't forget to enable journal_csum when metadata_csum is enabled. */
+       if (ext4_has_metadata_csum(sb))
+               set_opt(sb, JOURNAL_CHECKSUM);
+
        if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
                set_opt(sb, JOURNAL_DATA);
        else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3943,7 +3947,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
            !(sb->s_flags & MS_RDONLY))
                if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
-                       goto failed_mount3;
+                       goto failed_mount3a;
 
        /*
         * The first inode we look at is the journal inode.  Don't try
@@ -3952,7 +3956,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (!test_opt(sb, NOLOAD) &&
            EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
                if (ext4_load_journal(sb, es, journal_devnum))
-                       goto failed_mount3;
+                       goto failed_mount3a;
        } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
              EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
                ext4_msg(sb, KERN_ERR, "required journal recovery "
@@ -4240,6 +4244,7 @@ failed_mount_wq:
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
+failed_mount3a:
        ext4_es_unregister_shrinker(sbi);
 failed_mount3:
        del_timer_sync(&sbi->s_err_report);
@@ -4841,6 +4846,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
+       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+           test_opt(sb, JOURNAL_CHECKSUM)) {
+               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+                        "during remount not supported");
+               err = -EINVAL;
+               goto restore_opts;
+       }
+
        if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
                if (test_opt2(sb, EXPLICIT_DELALLOC)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
index 8898bbd2b61ee477b4f5168e9840c3a6d9048198..dcead636c33b5e5f3ef6b917966f44e007ce95e2 100644 (file)
@@ -93,6 +93,7 @@
 #include <linux/bio.h>
 #endif
 #include <linux/log2.h>
+#include <linux/hash.h>
 
 static struct kmem_cache *revoke_record_cache;
 static struct kmem_cache *revoke_table_cache;
@@ -129,15 +130,11 @@ static void flush_descriptor(journal_t *, struct journal_head *, int, int);
 
 /* Utility functions to maintain the revoke table */
 
-/* Borrowed from buffer.c: this is a tried and tested block hash function */
 static inline int hash(journal_t *journal, unsigned int block)
 {
        struct jbd_revoke_table_s *table = journal->j_revoke;
-       int hash_shift = table->hash_shift;
 
-       return ((block << (hash_shift - 6)) ^
-               (block >> 13) ^
-               (block << (hash_shift - 12))) & (table->hash_size - 1);
+       return hash_32(block, table->hash_shift);
 }
 
 static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
index d5e95a175c9244a24f26b52b4b9755ceee6f1400..c6cbaef2bda1498d8f2e00eeca30a40c8a462361 100644 (file)
@@ -92,6 +92,7 @@
 #include <linux/init.h>
 #include <linux/bio.h>
 #include <linux/log2.h>
+#include <linux/hash.h>
 #endif
 
 static struct kmem_cache *jbd2_revoke_record_cache;
@@ -130,16 +131,9 @@ static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
 
 /* Utility functions to maintain the revoke table */
 
-/* Borrowed from buffer.c: this is a tried and tested block hash function */
 static inline int hash(journal_t *journal, unsigned long long block)
 {
-       struct jbd2_revoke_table_s *table = journal->j_revoke;
-       int hash_shift = table->hash_shift;
-       int hash = (int)block ^ (int)((block >> 31) >> 1);
-
-       return ((hash << (hash_shift - 6)) ^
-               (hash >> 13) ^
-               (hash << (hash_shift - 12))) & (table->hash_size - 1);
+       return hash_64(block, journal->j_revoke->hash_shift);
 }
 
 static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
index 42df664e95e54a2148093c14fe1f1ce43b909f33..78512898d3ba6810b36a9e5128b8a5c84e547a91 100644 (file)
@@ -3154,7 +3154,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
        if (error)
                goto out2;
        audit_inode(pathname, nd->path.dentry, 0);
-       error = may_open(&nd->path, op->acc_mode, op->open_flag);
+       /* Don't check for other permissions, the inode was just created */
+       error = may_open(&nd->path, MAY_OPEN, op->open_flag);
        if (error)
                goto out2;
        file->f_path.mnt = nd->path.mnt;
index cdeb3cfd6f32b2d412c8478283c8ffc732d6dc06..0beb023f25ace63a8b4ec270eb69bdbaa3e19cbe 100644 (file)
@@ -1272,7 +1272,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
         */
        if (argp->opcnt == resp->opcnt)
                return false;
-
+       if (next->opnum == OP_ILLEGAL)
+               return false;
        nextd = OPDESC(next);
        /*
         * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
@@ -1589,7 +1590,8 @@ static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op
 static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
                                       struct nfsd4_op *op)
 {
-       return NFS4_MAX_SESSIONID_LEN + 20;
+       return (op_encode_hdr_size
+               + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
 }
 
 static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
@@ -1893,6 +1895,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
                .op_func = (nfsd4op_func)nfsd4_sequence,
                .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
                .op_name = "OP_SEQUENCE",
+               .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
        },
        [OP_DESTROY_CLIENTID] = {
                .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
index 9ce062218de9cf2559c02b92a7a201224d1d67fc..e8497144b32342437377748f26a616168455339a 100644 (file)
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
                spin_unlock(&inode->i_lock);
 
                /* In case the dropping of a reference would nuke next_i. */
-               if ((&next_i->i_sb_list != list) &&
-                   atomic_read(&next_i->i_count)) {
+               while (&next_i->i_sb_list != list) {
                        spin_lock(&next_i->i_lock);
-                       if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+                       if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
+                                               atomic_read(&next_i->i_count)) {
                                __iget(next_i);
                                need_iput = next_i;
+                               spin_unlock(&next_i->i_lock);
+                               break;
                        }
                        spin_unlock(&next_i->i_lock);
+                       next_i = list_entry(next_i->i_sb_list.next,
+                                               struct inode, i_sb_list);
                }
 
                /*
-                * We can safely drop inode_sb_list_lock here because we hold
-                * references on both inode and next_i.  Also no new inodes
-                * will be added since the umount has begun.
+                * We can safely drop inode_sb_list_lock here because either
+                * we actually hold references on both inode and next_i or
+                * end of list.  Also no new inodes will be added since the
+                * umount has begun.
                 */
                spin_unlock(&inode_sb_list_lock);
 
index 8add6f1030d7c0d9afc9aa56c918eb9bc3a10659..b931e04e33889742a6192255b3bd95d8779203ea 100644 (file)
@@ -158,7 +158,7 @@ bail_add:
                 * NOTE: This dentry already has ->d_op set from
                 * ocfs2_get_parent() and ocfs2_get_dentry()
                 */
-               if (ret)
+               if (!IS_ERR_OR_NULL(ret))
                        dentry = ret;
 
                status = ocfs2_dentry_attach_lock(dentry, inode,
index 8b663b2d95622f98b704ed693b1177d13cfa79dd..6b4527216a7fe2c60ead15d57383648f25d4d3ef 100644 (file)
@@ -634,7 +634,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                        dqstats_inc(DQST_LOOKUPS);
                        err = sb->dq_op->write_dquot(dquot);
                        if (!ret && err)
-                               err = ret;
+                               ret = err;
                        dqput(dquot);
                        spin_lock(&dq_list_lock);
                }
index 0207a78a8d82274546cb2436c2568a53559317f9..6cbee8395f603b91d89f8cee731d2ae3445d70ba 100644 (file)
@@ -1583,13 +1583,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq,
                                          struct request *r1,
                                          struct request *r2)
 {
-       return 0;
+       return true;
 }
 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
                                           struct request *r,
                                           struct bio *b)
 {
-       return 0;
+       return true;
 }
 static inline bool blk_integrity_is_initialized(struct gendisk *g)
 {
index 2507fd2a1eb4f9d4971b9de5344e8f250570c012..d1a558239b1a189768d804c13b39617677cd93b4 100644 (file)
@@ -71,7 +71,6 @@
  *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
  *
  * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
  *
  * (asm goto is automatically volatile - the naming reflects this.)
  */
index cdd1cc202d51ef2c33654630023022284977ea8d..c8c565952548e6ca834425b51dce65c34e5255e7 100644 (file)
@@ -53,7 +53,6 @@
  *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
  *
  * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
  *
  * (asm goto is automatically volatile - the naming reflects this.)
  */
index 6b394f0b51485076b28ed9fc2136d854b5d1c677..eeb3079857155355161047eda5d2d298d7fd2a3f 100644 (file)
@@ -6,7 +6,8 @@
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern int __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+                                     unsigned long vm_flags);
 
 #define khugepaged_enabled()                                          \
        (transparent_hugepage_flags &                                  \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
                __khugepaged_exit(mm);
 }
 
-static inline int khugepaged_enter(struct vm_area_struct *vma)
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+                                  unsigned long vm_flags)
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
                if ((khugepaged_always() ||
-                    (khugepaged_req_madv() &&
-                     vma->vm_flags & VM_HUGEPAGE)) &&
-                   !(vma->vm_flags & VM_NOHUGEPAGE))
+                    (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
+                   !(vm_flags & VM_NOHUGEPAGE))
                        if (__khugepaged_enter(vma->vm_mm))
                                return -ENOMEM;
        return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 static inline void khugepaged_exit(struct mm_struct *mm)
 {
 }
-static inline int khugepaged_enter(struct vm_area_struct *vma)
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+                                  unsigned long vm_flags)
 {
        return 0;
 }
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+                                            unsigned long vm_flags)
 {
        return 0;
 }
index 19df5d857411a7cc567fa7571985bfdcd65c28b0..6b75640ef5ab553a4e0292940bacc466db1a6437 100644 (file)
@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
        return false;
 }
 
-void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
-                                        unsigned long *flags);
-
-extern atomic_t memcg_moving;
-
-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
-                                       bool *locked, unsigned long *flags)
-{
-       if (mem_cgroup_disabled())
-               return;
-       rcu_read_lock();
-       *locked = false;
-       if (atomic_read(&memcg_moving))
-               __mem_cgroup_begin_update_page_stat(page, locked, flags);
-}
-
-void __mem_cgroup_end_update_page_stat(struct page *page,
-                               unsigned long *flags);
-static inline void mem_cgroup_end_update_page_stat(struct page *page,
-                                       bool *locked, unsigned long *flags)
-{
-       if (mem_cgroup_disabled())
-               return;
-       if (*locked)
-               __mem_cgroup_end_update_page_stat(page, flags);
-       rcu_read_unlock();
-}
-
-void mem_cgroup_update_page_stat(struct page *page,
-                                enum mem_cgroup_stat_index idx,
-                                int val);
-
-static inline void mem_cgroup_inc_page_stat(struct page *page,
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
+                                             unsigned long *flags);
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
+                             unsigned long flags);
+void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+                                enum mem_cgroup_stat_index idx, int val);
+
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
                                            enum mem_cgroup_stat_index idx)
 {
-       mem_cgroup_update_page_stat(page, idx, 1);
+       mem_cgroup_update_page_stat(memcg, idx, 1);
 }
 
-static inline void mem_cgroup_dec_page_stat(struct page *page,
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
                                            enum mem_cgroup_stat_index idx)
 {
-       mem_cgroup_update_page_stat(page, idx, -1);
+       mem_cgroup_update_page_stat(memcg, idx, -1);
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
 }
 
-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
+static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
                                        bool *locked, unsigned long *flags)
 {
+       return NULL;
 }
 
-static inline void mem_cgroup_end_update_page_stat(struct page *page,
-                                       bool *locked, unsigned long *flags)
+static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
+                                       bool locked, unsigned long flags)
 {
 }
 
@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
        return false;
 }
 
-static inline void mem_cgroup_inc_page_stat(struct page *page,
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
                                            enum mem_cgroup_stat_index idx)
 {
 }
 
-static inline void mem_cgroup_dec_page_stat(struct page *page,
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
                                            enum mem_cgroup_stat_index idx)
 {
 }
index 27eb1bfbe7049adbaac4d90b8f2f77f95c9741bc..b46461116cd2c49333ee26767a4ff5af01dd8ffa 100644 (file)
@@ -1235,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
 int redirty_page_for_writepage(struct writeback_control *wbc,
                                struct page *page);
 void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_writeback(struct page *page);
 int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
index 5b5efae091350a839e5b27ff5c5b5b34406e9f29..ad2f6705437266373ff1bb73881df0a5da8f0d3b 100644 (file)
@@ -16,7 +16,7 @@ struct reserved_mem {
 };
 
 struct reserved_mem_ops {
-       void    (*device_init)(struct reserved_mem *rmem,
+       int     (*device_init)(struct reserved_mem *rmem,
                               struct device *dev);
        void    (*device_release)(struct reserved_mem *rmem,
                                  struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
        _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
 
 #ifdef CONFIG_OF_RESERVED_MEM
-void of_reserved_mem_device_init(struct device *dev);
+int of_reserved_mem_device_init(struct device *dev);
 void of_reserved_mem_device_release(struct device *dev);
 
 void fdt_init_reserved_mem(void);
 void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
                               phys_addr_t base, phys_addr_t size);
 #else
-static inline void of_reserved_mem_device_init(struct device *dev) { }
+static inline int of_reserved_mem_device_init(struct device *dev)
+{
+       return -ENOSYS;
+}
 static inline void of_reserved_mem_device_release(struct device *pdev) { }
 
 static inline void fdt_init_reserved_mem(void) { }
index d347c805f923f8539b6430fa30119527edb7f244..f540b1496e2f798f63c4cc1db0803f12f3e6e732 100644 (file)
@@ -35,6 +35,8 @@
 #ifndef __LINUX_REGULATOR_CONSUMER_H_
 #define __LINUX_REGULATOR_CONSUMER_H_
 
+#include <linux/err.h>
+
 struct device;
 struct notifier_block;
 struct regmap;
index a59d9343c25bdc9575eefa1ac361732599db9ddf..6c8b6f604e7609f2d320a7aa572a8ec6a870f4b8 100644 (file)
@@ -557,7 +557,9 @@ struct sk_buff {
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
         */
+       /* private: */
        __u32                   headers_start[0];
+       /* public: */
 
 /* if you move pkt_type around you also must adapt those constants */
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -642,7 +644,9 @@ struct sk_buff {
        __u16                   network_header;
        __u16                   mac_header;
 
+       /* private: */
        __u32                   headers_end[0];
+       /* public: */
 
        /* These elements must be at the end, see alloc_skb() for details.  */
        sk_buff_data_t          tail;
@@ -795,15 +799,19 @@ struct sk_buff_fclones {
  *     @skb: buffer
  *
  * Returns true is skb is a fast clone, and its clone is not freed.
+ * Some drivers call skb_orphan() in their ndo_start_xmit(),
+ * so we also check that this didnt happen.
  */
-static inline bool skb_fclone_busy(const struct sk_buff *skb)
+static inline bool skb_fclone_busy(const struct sock *sk,
+                                  const struct sk_buff *skb)
 {
        const struct sk_buff_fclones *fclones;
 
        fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
        return skb->fclone == SKB_FCLONE_ORIG &&
-              fclones->skb2.fclone == SKB_FCLONE_CLONE;
+              fclones->skb2.fclone == SKB_FCLONE_CLONE &&
+              fclones->skb2.sk == sk;
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
index 26088feb660813448ac93391bee76c1e73b57d70..d9a4905e01d0c98b88e89c7db5c85bbf7d5be8a1 100644 (file)
@@ -78,6 +78,7 @@ struct usbnet {
 #              define EVENT_NO_RUNTIME_PM      9
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
+#              define EVENT_SET_RX_MODE        12
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
        /* called by minidriver when receiving indication */
        void    (*indication)(struct usbnet *dev, void *ind, int indlen);
 
+       /* rx mode change (device changes address list filtering) */
+       void    (*set_rx_mode)(struct usbnet *dev);
+
        /* for new devices, use the descriptor-reading code instead */
        int             in;             /* rx endpoint */
        int             out;            /* tx endpoint */
index 97f472012438b10a0bcbff9eef6c553d7357ad35..4292929392b0127479c49c5da3bb33f053f4428c 100644 (file)
@@ -671,6 +671,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
+void ipv6_proxy_select_ident(struct sk_buff *skb);
+
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
 static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
index e8427193c777b884428aa1712ff18f910ab8d24a..03e928a552290f57b050c4ce7dbe6592d02102e4 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IPV4_NF_REJECT_H
 #define _IPV4_NF_REJECT_H
 
+#include <linux/skbuff.h>
+#include <net/ip.h>
 #include <net/icmp.h>
 
 static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
@@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
 
 void nf_send_reset(struct sk_buff *oldskb, int hook);
 
+const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+                                            struct tcphdr *_oth, int hook);
+struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+                                 const struct sk_buff *oldskb,
+                                 __be16 protocol, int ttl);
+void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+                            const struct tcphdr *oth);
+
 #endif /* _IPV4_NF_REJECT_H */
index 48e18810a9be6fcc6250f80db0d8febcf4e3cdf4..23216d48abf9ddb79aa188cbf55e0a032e4f9e78 100644 (file)
@@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
 
 void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
 
+const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+                                             struct tcphdr *otcph,
+                                             unsigned int *otcplen, int hook);
+struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
+                                    const struct sk_buff *oldskb,
+                                    __be16 protocol, int hoplimit);
+void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+                             const struct sk_buff *oldskb,
+                             const struct tcphdr *oth, unsigned int otcplen);
+
 #endif /* _IPV6_NF_REJECT_H */
index 3d7292392fac91ca806b12945fd9eb190c21a031..845c596bf594c5ac297320c1a3dfe8f34cf702b4 100644 (file)
@@ -530,6 +530,9 @@ enum nft_chain_type {
        NFT_CHAIN_T_MAX
 };
 
+int nft_chain_validate_dependency(const struct nft_chain *chain,
+                                 enum nft_chain_type type);
+
 struct nft_stats {
        u64                     bytes;
        u64                     pkts;
index c72729f954f41e2168d5a095db5cc1ad39f38878..e2a518b60e19039014e1298f3618046e9e030238 100644 (file)
@@ -13,4 +13,7 @@ int nft_masq_init(const struct nft_ctx *ctx,
 
 int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
 
+int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                     const struct nft_data **data);
+
 #endif /* _NFT_MASQ_H_ */
index 9b56f37148cfd91d57b849f88c8b1279220aa41a..e335e7d8c6c283a807b1fc7b948109b8cf809b5c 100644 (file)
@@ -660,18 +660,18 @@ TRACE_EVENT(rcu_torture_read,
 /*
  * Tracepoint for _rcu_barrier() execution.  The string "s" describes
  * the _rcu_barrier phase:
- *     "Begin": rcu_barrier_callback() started.
- *     "Check": rcu_barrier_callback() checking for piggybacking.
- *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
- *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
- *     "Offline": rcu_barrier_callback() found offline CPU
- *     "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU.
- *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
- *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "Begin": _rcu_barrier() started.
+ *     "Check": _rcu_barrier() checking for piggybacking.
+ *     "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
+ *     "Inc1": _rcu_barrier() piggyback check counter incremented.
+ *     "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
+ *     "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
+ *     "OnlineQ": _rcu_barrier() found online CPU with callbacks.
+ *     "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
  *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
  *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
  *     "LastCB": An rcu_barrier_callback() invoked the last callback.
- *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Inc2": _rcu_barrier() piggyback check counter incremented.
  * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
  * is the count of remaining callbacks, and "done" is the piggybacking count.
  */
index 1874ebe9ac1e505b0215a90f03a9b2ce57f6b2ff..a1d7e931ab72cdfb93db7f434275492d99aca77d 100644 (file)
@@ -739,6 +739,13 @@ struct input_keymap_entry {
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
 
+#define KEY_KBDINPUTASSIST_PREV                0x260
+#define KEY_KBDINPUTASSIST_NEXT                0x261
+#define KEY_KBDINPUTASSIST_PREVGROUP           0x262
+#define KEY_KBDINPUTASSIST_NEXTGROUP           0x263
+#define KEY_KBDINPUTASSIST_ACCEPT              0x264
+#define KEY_KBDINPUTASSIST_CANCEL              0x265
+
 #define BTN_TRIGGER_HAPPY              0x2c0
 #define BTN_TRIGGER_HAPPY1             0x2c0
 #define BTN_TRIGGER_HAPPY2             0x2c1
index 9269de254874237c71b61713ee4bc9dc7a6aabe5..9d845404d875dc28c1a69a0fe311693f04dc60d2 100644 (file)
@@ -364,7 +364,7 @@ struct perf_event_mmap_page {
        /*
         * Bits needed to read the hw events in user-space.
         *
-        *   u32 seq, time_mult, time_shift, idx, width;
+        *   u32 seq, time_mult, time_shift, index, width;
         *   u64 count, enabled, running;
         *   u64 cyc, time_offset;
         *   s64 pmc = 0;
@@ -383,11 +383,11 @@ struct perf_event_mmap_page {
         *       time_shift  = pc->time_shift;
         *     }
         *
-        *     idx = pc->index;
+        *     index = pc->index;
         *     count = pc->offset;
-        *     if (pc->cap_usr_rdpmc && idx) {
+        *     if (pc->cap_user_rdpmc && index) {
         *       width = pc->pmc_width;
-        *       pmc = rdpmc(idx - 1);
+        *       pmc = rdpmc(index - 1);
         *     }
         *
         *     barrier();
@@ -415,7 +415,7 @@ struct perf_event_mmap_page {
        };
 
        /*
-        * If cap_usr_rdpmc this field provides the bit-width of the value
+        * If cap_user_rdpmc this field provides the bit-width of the value
         * read using the rdpmc() or equivalent instruction. This can be used
         * to sign extend the result like:
         *
@@ -439,10 +439,10 @@ struct perf_event_mmap_page {
         *
         * Where time_offset,time_mult,time_shift and cyc are read in the
         * seqcount loop described above. This delta can then be added to
-        * enabled and possible running (if idx), improving the scaling:
+        * enabled and possible running (if index), improving the scaling:
         *
         *   enabled += delta;
-        *   if (idx)
+        *   if (index)
         *     running += delta;
         *
         *   quot = count / running;
index 34f9d7387d134a12923669e3516ad2892a41f84d..b932be9f5c5bd94ba2513367435ceb03d1130431 100644 (file)
@@ -13,7 +13,7 @@
 #define CLONE_VFORK    0x00004000      /* set if the parent wants the child to wake it up on mm_release */
 #define CLONE_PARENT   0x00008000      /* set if we want to have the same parent as the cloner */
 #define CLONE_THREAD   0x00010000      /* Same thread group? */
-#define CLONE_NEWNS    0x00020000      /* New namespace group? */
+#define CLONE_NEWNS    0x00020000      /* New mount namespace group */
 #define CLONE_SYSVSEM  0x00040000      /* share system V SEM_UNDO semantics */
 #define CLONE_SETTLS   0x00080000      /* create a new TLS for the child */
 #define CLONE_PARENT_SETTID    0x00100000      /* set the TID in the parent */
index 6a0764c89fcbeaf29cee9bf5dcba996ed9bc3c98..6c8f159e416eea1b2781b4e00139b4b3afe2ea9a 100644 (file)
 #ifndef _V4L2_DV_TIMINGS_H
 #define _V4L2_DV_TIMINGS_H
 
+#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
+/* Sadly gcc versions older than 4.6 have a bug in how they initialize
+   anonymous unions where they require additional curly brackets.
+   This violates the C1x standard. This workaround adds the curly brackets
+   if needed. */
 #define V4L2_INIT_BT_TIMINGS(_width, args...) \
        { .bt = { _width , ## args } }
+#else
+#define V4L2_INIT_BT_TIMINGS(_width, args...) \
+       .bt = { _width , ## args }
+#endif
 
 /* CEA-861-E timings (i.e. standard HDTV timings) */
 
index 3ee28ae02cc89cb0c0d3b2a9ca112c1edfda2e96..2081a4d3d9171f5323a077a1f3a9805223e715be 100644 (file)
@@ -1341,6 +1341,10 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
 config HAVE_PCSPKR_PLATFORM
        bool
 
+# interpreter that classic socket filters depend on
+config BPF
+       bool
+
 menuconfig EXPERT
        bool "Configure standard kernel features (expert users)"
        # Unhide debug options, to make the on-by-default options visible
@@ -1521,6 +1525,16 @@ config EVENTFD
 
          If unsure, say Y.
 
+# syscall, maps, verifier
+config BPF_SYSCALL
+       bool "Enable bpf() system call" if EXPERT
+       select ANON_INODES
+       select BPF
+       default n
+       help
+         Enable the bpf() system call that allows to manipulate eBPF
+         programs and maps via file descriptors.
+
 config SHMEM
        bool "Use full shmem filesystem" if EXPERT
        default y
index dc5c77544fd69f6924adc25c3f4a8d3530708392..17ea6d4a9a247ce261f8ae22a3f07f0bb80393e1 100644 (file)
@@ -86,7 +86,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
-obj-$(CONFIG_NET) += bpf/
+obj-$(CONFIG_BPF) += bpf/
 
 obj-$(CONFIG_PERF_EVENTS) += events/
 
index 45427239f375c564049951e00e3925aacff8bc3f..0daf7f6ae7df565c43273b8fb3a553bc1ffdd622 100644 (file)
@@ -1,5 +1,5 @@
-obj-y := core.o syscall.o verifier.o
-
+obj-y := core.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
 ifdef CONFIG_TEST_BPF
-obj-y += test_stub.o
+obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
 endif
index f0c30c59b31755ff1e124e0a5a0aaa0e356efc9e..d6594e457a25af32e41a4b46a00a43ddf0a2c46b 100644 (file)
@@ -655,3 +655,12 @@ void bpf_prog_free(struct bpf_prog *fp)
        schedule_work(&aux->work);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
+
+/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
+ * skb_copy_bits(), so provide a weak definition of it for NET-less config.
+ */
+int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
+                        int len)
+{
+       return -EFAULT;
+}
index 801f5f3b9307cc5cbe8ed24f13e2aa1cbab53dc6..9f81818f294185988c9cc181274ed38789774871 100644 (file)
@@ -1409,7 +1409,8 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
                if (memcmp(&old->regs[i], &cur->regs[i],
                           sizeof(old->regs[0])) != 0) {
                        if (old->regs[i].type == NOT_INIT ||
-                           old->regs[i].type == UNKNOWN_VALUE)
+                           (old->regs[i].type == UNKNOWN_VALUE &&
+                            cur->regs[i].type != NOT_INIT))
                                continue;
                        return false;
                }
index 5664985c46a0972b6961e117e99d7a399940e1c0..937ecdfdf2589cc896522286a4da2e3d4f097bf9 100644 (file)
@@ -107,46 +107,6 @@ void context_tracking_user_enter(void)
 }
 NOKPROBE_SYMBOL(context_tracking_user_enter);
 
-#ifdef CONFIG_PREEMPT
-/**
- * preempt_schedule_context - preempt_schedule called by tracing
- *
- * The tracing infrastructure uses preempt_enable_notrace to prevent
- * recursion and tracing preempt enabling caused by the tracing
- * infrastructure itself. But as tracing can happen in areas coming
- * from userspace or just about to enter userspace, a preempt enable
- * can occur before user_exit() is called. This will cause the scheduler
- * to be called when the system is still in usermode.
- *
- * To prevent this, the preempt_enable_notrace will use this function
- * instead of preempt_schedule() to exit user context if needed before
- * calling the scheduler.
- */
-asmlinkage __visible void __sched notrace preempt_schedule_context(void)
-{
-       enum ctx_state prev_ctx;
-
-       if (likely(!preemptible()))
-               return;
-
-       /*
-        * Need to disable preemption in case user_exit() is traced
-        * and the tracer calls preempt_enable_notrace() causing
-        * an infinite recursion.
-        */
-       preempt_disable_notrace();
-       prev_ctx = exception_enter();
-       preempt_enable_no_resched_notrace();
-
-       preempt_schedule();
-
-       preempt_disable_notrace();
-       exception_exit(prev_ctx);
-       preempt_enable_notrace();
-}
-EXPORT_SYMBOL_GPL(preempt_schedule_context);
-#endif /* CONFIG_PREEMPT */
-
 /**
  * context_tracking_user_exit - Inform the context tracking that the CPU is
  *                              exiting userspace mode and entering the kernel.
index 356450f09c1f89d1ee35820a59a59e3a68cebe63..90a3d017b90ce56edee804892492bd5b62442c62 100644 (file)
@@ -64,6 +64,8 @@ static struct {
         * an ongoing cpu hotplug operation.
         */
        int refcount;
+       /* And allows lockless put_online_cpus(). */
+       atomic_t puts_pending;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map dep_map;
@@ -113,7 +115,11 @@ void put_online_cpus(void)
 {
        if (cpu_hotplug.active_writer == current)
                return;
-       mutex_lock(&cpu_hotplug.lock);
+       if (!mutex_trylock(&cpu_hotplug.lock)) {
+               atomic_inc(&cpu_hotplug.puts_pending);
+               cpuhp_lock_release();
+               return;
+       }
 
        if (WARN_ON(!cpu_hotplug.refcount))
                cpu_hotplug.refcount++; /* try to fix things up */
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void)
        cpuhp_lock_acquire();
        for (;;) {
                mutex_lock(&cpu_hotplug.lock);
+               if (atomic_read(&cpu_hotplug.puts_pending)) {
+                       int delta;
+
+                       delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
+                       cpu_hotplug.refcount -= delta;
+               }
                if (likely(!cpu_hotplug.refcount))
                        break;
                __set_current_state(TASK_UNINTERRUPTIBLE);
index 1425d07018de50987b5373dec88bff3438787426..2b02c9fda790d667c29c304bca011901a4e30189 100644 (file)
@@ -6071,11 +6071,6 @@ static int perf_swevent_init(struct perf_event *event)
        return 0;
 }
 
-static int perf_swevent_event_idx(struct perf_event *event)
-{
-       return 0;
-}
-
 static struct pmu perf_swevent = {
        .task_ctx_nr    = perf_sw_context,
 
@@ -6085,8 +6080,6 @@ static struct pmu perf_swevent = {
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-
-       .event_idx      = perf_swevent_event_idx,
 };
 
 #ifdef CONFIG_EVENT_TRACING
@@ -6204,8 +6197,6 @@ static struct pmu perf_tracepoint = {
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-
-       .event_idx      = perf_swevent_event_idx,
 };
 
 static inline void perf_tp_register(void)
@@ -6431,8 +6422,6 @@ static struct pmu perf_cpu_clock = {
        .start          = cpu_clock_event_start,
        .stop           = cpu_clock_event_stop,
        .read           = cpu_clock_event_read,
-
-       .event_idx      = perf_swevent_event_idx,
 };
 
 /*
@@ -6511,8 +6500,6 @@ static struct pmu perf_task_clock = {
        .start          = task_clock_event_start,
        .stop           = task_clock_event_stop,
        .read           = task_clock_event_read,
-
-       .event_idx      = perf_swevent_event_idx,
 };
 
 static void perf_pmu_nop_void(struct pmu *pmu)
@@ -6542,7 +6529,7 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
 
 static int perf_event_idx_default(struct perf_event *event)
 {
-       return event->hw.idx + 1;
+       return 0;
 }
 
 /*
index 1559fb0b929650fe8951ee4b5084717d9d0b4895..9803a6600d499681848290f9eff6254e53364dbf 100644 (file)
@@ -605,11 +605,6 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags)
        bp->hw.state = PERF_HES_STOPPED;
 }
 
-static int hw_breakpoint_event_idx(struct perf_event *bp)
-{
-       return 0;
-}
-
 static struct pmu perf_breakpoint = {
        .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
 
@@ -619,8 +614,6 @@ static struct pmu perf_breakpoint = {
        .start          = hw_breakpoint_start,
        .stop           = hw_breakpoint_stop,
        .read           = hw_breakpoint_pmu_read,
-
-       .event_idx      = hw_breakpoint_event_idx,
 };
 
 int __init init_hw_breakpoint(void)
index f3a3a071283cea232512370c2b9f5c87a0b440bf..63678b573d6135201700db85ede47d5111082a9c 100644 (file)
  *
  * Where (A) orders the waiters increment and the futex value read through
  * atomic operations (see hb_waiters_inc) and where (B) orders the write
- * to futex and the waiters read -- this is done by the barriers in
- * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
- * futex type.
+ * to futex and the waiters read -- this is done by the barriers for both
+ * shared and private futexes in get_futex_key_refs().
  *
  * This yields the following case (where X:=waiters, Y:=futex):
  *
@@ -344,13 +343,20 @@ static void get_futex_key_refs(union futex_key *key)
                futex_get_mm(key); /* implies MB (B) */
                break;
        default:
+               /*
+                * Private futexes do not hold reference on an inode or
+                * mm, therefore the only purpose of calling get_futex_key_refs
+                * is because we need the barrier for the lockless waiter check.
+                */
                smp_mb(); /* explicit MB (B) */
        }
 }
 
 /*
  * Drop a reference to the resource addressed by a key.
- * The hash bucket spinlock must not be held.
+ * The hash bucket spinlock must not be held. This is
+ * a no-op for private futexes, see comment in the get
+ * counterpart.
  */
 static void drop_futex_key_refs(union futex_key *key)
 {
@@ -641,8 +647,14 @@ static struct futex_pi_state * alloc_pi_state(void)
        return pi_state;
 }
 
+/*
+ * Must be called with the hb lock held.
+ */
 static void free_pi_state(struct futex_pi_state *pi_state)
 {
+       if (!pi_state)
+               return;
+
        if (!atomic_dec_and_test(&pi_state->refcount))
                return;
 
@@ -1521,15 +1533,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
        }
 
 retry:
-       if (pi_state != NULL) {
-               /*
-                * We will have to lookup the pi_state again, so free this one
-                * to keep the accounting correct.
-                */
-               free_pi_state(pi_state);
-               pi_state = NULL;
-       }
-
        ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
@@ -1619,6 +1622,8 @@ retry_private:
                case 0:
                        break;
                case -EFAULT:
+                       free_pi_state(pi_state);
+                       pi_state = NULL;
                        double_unlock_hb(hb1, hb2);
                        hb_waiters_dec(hb2);
                        put_futex_key(&key2);
@@ -1634,6 +1639,8 @@ retry_private:
                         *   exit to complete.
                         * - The user space value changed.
                         */
+                       free_pi_state(pi_state);
+                       pi_state = NULL;
                        double_unlock_hb(hb1, hb2);
                        hb_waiters_dec(hb2);
                        put_futex_key(&key2);
@@ -1710,6 +1717,7 @@ retry_private:
        }
 
 out_unlock:
+       free_pi_state(pi_state);
        double_unlock_hb(hb1, hb2);
        hb_waiters_dec(hb2);
 
@@ -1727,8 +1735,6 @@ out_put_keys:
 out_put_key1:
        put_futex_key(&key1);
 out:
-       if (pi_state != NULL)
-               free_pi_state(pi_state);
        return ret ? ret : task_count;
 }
 
index cf66c5c8458e66aef61834e977e55999b442360a..3b7408759bdfdfc4a5fe469df649d631efdae40d 100644 (file)
@@ -35,7 +35,7 @@ config GCOV_KERNEL
 config GCOV_PROFILE_ALL
        bool "Profile entire Kernel"
        depends on GCOV_KERNEL
-       depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM
+       depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
        default n
        ---help---
        This options activates profiling for the entire kernel.
index 8637e041a24722b7245f5a5cf24fa7dd71dce001..80f7a6d00519c863ac4e0ea94c86406532a6f65a 100644 (file)
@@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...)
 EXPORT_SYMBOL(__request_module);
 #endif /* CONFIG_MODULES */
 
+static void call_usermodehelper_freeinfo(struct subprocess_info *info)
+{
+       if (info->cleanup)
+               (*info->cleanup)(info);
+       kfree(info);
+}
+
+static void umh_complete(struct subprocess_info *sub_info)
+{
+       struct completion *comp = xchg(&sub_info->complete, NULL);
+       /*
+        * See call_usermodehelper_exec(). If xchg() returns NULL
+        * we own sub_info, the UMH_KILLABLE caller has gone away
+        * or the caller used UMH_NO_WAIT.
+        */
+       if (comp)
+               complete(comp);
+       else
+               call_usermodehelper_freeinfo(sub_info);
+}
+
 /*
  * This is the task which runs the usermode application
  */
 static int ____call_usermodehelper(void *data)
 {
        struct subprocess_info *sub_info = data;
+       int wait = sub_info->wait & ~UMH_KILLABLE;
        struct cred *new;
        int retval;
 
@@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data)
        retval = -ENOMEM;
        new = prepare_kernel_cred(current);
        if (!new)
-               goto fail;
+               goto out;
 
        spin_lock(&umh_sysctl_lock);
        new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
@@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data)
                retval = sub_info->init(sub_info, new);
                if (retval) {
                        abort_creds(new);
-                       goto fail;
+                       goto out;
                }
        }
 
@@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data)
        retval = do_execve(getname_kernel(sub_info->path),
                           (const char __user *const __user *)sub_info->argv,
                           (const char __user *const __user *)sub_info->envp);
+out:
+       sub_info->retval = retval;
+       /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
+       if (wait != UMH_WAIT_PROC)
+               umh_complete(sub_info);
        if (!retval)
                return 0;
-
-       /* Exec failed? */
-fail:
-       sub_info->retval = retval;
        do_exit(0);
 }
 
@@ -258,26 +281,6 @@ static int call_helper(void *data)
        return ____call_usermodehelper(data);
 }
 
-static void call_usermodehelper_freeinfo(struct subprocess_info *info)
-{
-       if (info->cleanup)
-               (*info->cleanup)(info);
-       kfree(info);
-}
-
-static void umh_complete(struct subprocess_info *sub_info)
-{
-       struct completion *comp = xchg(&sub_info->complete, NULL);
-       /*
-        * See call_usermodehelper_exec(). If xchg() returns NULL
-        * we own sub_info, the UMH_KILLABLE caller has gone away.
-        */
-       if (comp)
-               complete(comp);
-       else
-               call_usermodehelper_freeinfo(sub_info);
-}
-
 /* Keventd can't block, but this (a child) can. */
 static int wait_for_helper(void *data)
 {
@@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work)
                kmod_thread_locker = NULL;
        }
 
-       switch (wait) {
-       case UMH_NO_WAIT:
-               call_usermodehelper_freeinfo(sub_info);
-               break;
-
-       case UMH_WAIT_PROC:
-               if (pid > 0)
-                       break;
-               /* FALLTHROUGH */
-       case UMH_WAIT_EXEC:
-               if (pid < 0)
-                       sub_info->retval = pid;
+       if (pid < 0) {
+               sub_info->retval = pid;
                umh_complete(sub_info);
        }
 }
@@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
                goto out;
        }
 
-       sub_info->complete = &done;
+       /*
+        * Set the completion pointer only if there is a waiter.
+        * This makes it possible to use umh_complete to free
+        * the data structure in case of UMH_NO_WAIT.
+        */
+       sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
        sub_info->wait = wait;
 
        queue_work(khelper_wq, &sub_info->work);
index 133e47223095d76fd1203f949a955009d1075688..9815447d22e0354d5304b0ef8394577f09414602 100644 (file)
@@ -3299,11 +3299,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
                        continue;
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (rcu_is_nocb_cpu(cpu)) {
-                       _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
-                                          rsp->n_barrier_done);
-                       atomic_inc(&rsp->barrier_cpu_count);
-                       __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
-                                  rsp, cpu, 0);
+                       if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
+                               _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                       } else {
+                               _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                               atomic_inc(&rsp->barrier_cpu_count);
+                               __call_rcu(&rdp->barrier_head,
+                                          rcu_barrier_callback, rsp, cpu, 0);
+                       }
                } else if (ACCESS_ONCE(rdp->qlen)) {
                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
                                           rsp->n_barrier_done);
index d03764652d910cb6b8e7f4039ee184ff549187eb..bbdc45d8d74f46a8812cc79cec72ca34cff835d3 100644 (file)
@@ -587,6 +587,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
 static void print_cpu_stall_info_end(void);
 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static void increment_cpu_stall_ticks(void);
+static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
 static void rcu_init_one_nocb(struct rcu_node *rnp);
index 387dd45993449f7a54310fd936693bbc16611f5b..c1d7f27bd38f727419e6628097660b492c874321 100644 (file)
@@ -2049,6 +2049,33 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
        }
 }
 
+/*
+ * Does the specified CPU need an RCU callback for the specified flavor
+ * of rcu_barrier()?
+ */
+static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       struct rcu_head *rhp;
+
+       /* No-CBs CPUs might have callbacks on any of three lists. */
+       rhp = ACCESS_ONCE(rdp->nocb_head);
+       if (!rhp)
+               rhp = ACCESS_ONCE(rdp->nocb_gp_head);
+       if (!rhp)
+               rhp = ACCESS_ONCE(rdp->nocb_follower_head);
+
+       /* Having no rcuo kthread but CBs after scheduler starts is bad! */
+       if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
+               /* RCU callback enqueued before CPU first came online??? */
+               pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
+                      cpu, rhp->func);
+               WARN_ON_ONCE(1);
+       }
+
+       return !!rhp;
+}
+
 /*
  * Enqueue the specified string of rcu_head structures onto the specified
  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
@@ -2642,6 +2669,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 
+static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+{
+       WARN_ON_ONCE(1); /* Should be dead code. */
+       return false;
+}
+
 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
 }
index 44999505e1bf6894bdcdb48531a49e5a07b88ac5..240157c13ddc88508a4aafa87d8350394b38e94b 100644 (file)
@@ -2951,6 +2951,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
 }
 NOKPROBE_SYMBOL(preempt_schedule);
 EXPORT_SYMBOL(preempt_schedule);
+
+#ifdef CONFIG_CONTEXT_TRACKING
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
+{
+       enum ctx_state prev_ctx;
+
+       if (likely(!preemptible()))
+               return;
+
+       do {
+               __preempt_count_add(PREEMPT_ACTIVE);
+               /*
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
+                */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
+
+               __preempt_count_sub(PREEMPT_ACTIVE);
+               barrier();
+       } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #endif /* CONFIG_PREEMPT */
 
 /*
@@ -7833,6 +7874,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
        sched_offline_group(tg);
 }
 
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+       sched_move_task(task);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
@@ -8205,6 +8251,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
index 256e577faf1bf977ae00a97f742103a826714f93..5285332392d5b599788a90faa37615ccddc386d5 100644 (file)
@@ -518,12 +518,20 @@ again:
        }
 
        /*
-        * We need to take care of a possible races here. In fact, the
-        * task might have changed its scheduling policy to something
-        * different from SCHED_DEADLINE or changed its reservation
-        * parameters (through sched_setattr()).
+        * We need to take care of several possible races here:
+        *
+        *   - the task might have changed its scheduling policy
+        *     to something different than SCHED_DEADLINE
+        *   - the task might have changed its reservation parameters
+        *     (through sched_setattr())
+        *   - the task might have been boosted by someone else and
+        *     might be in the boosting/deboosting path
+        *
+        * In all this cases we bail out, as the task is already
+        * in the runqueue or is going to be enqueued back anyway.
         */
-       if (!dl_task(p) || dl_se->dl_new)
+       if (!dl_task(p) || dl_se->dl_new ||
+           dl_se->dl_boosted || !dl_se->dl_throttled)
                goto unlock;
 
        sched_clock_tick();
@@ -532,7 +540,7 @@ again:
        dl_se->dl_yielded = 0;
        if (task_on_rq_queued(p)) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
-               if (task_has_dl_policy(rq->curr))
+               if (dl_task(rq->curr))
                        check_preempt_curr_dl(rq, p, 0);
                else
                        resched_curr(rq);
@@ -847,8 +855,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
         * smaller than our one... OTW we keep our runtime and
         * deadline.
         */
-       if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
+       if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
                pi_se = &pi_task->dl;
+       } else if (!dl_prio(p->normal_prio)) {
+               /*
+                * Special case in which we have a !SCHED_DEADLINE task
+                * that is going to be deboosted, but exceedes its
+                * runtime while doing so. No point in replenishing
+                * it, as it's going to return back to its original
+                * scheduling class after this.
+                */
+               BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
+               return;
+       }
 
        /*
         * If p is throttled, we do nothing. In fact, if it exhausted
@@ -1607,8 +1626,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
                        /* Only reschedule if pushing failed */
                        check_resched = 0;
 #endif /* CONFIG_SMP */
-               if (check_resched && task_has_dl_policy(rq->curr))
-                       check_preempt_curr_dl(rq, p, 0);
+               if (check_resched) {
+                       if (dl_task(rq->curr))
+                               check_preempt_curr_dl(rq, p, 0);
+                       else
+                               resched_curr(rq);
+               }
        }
 }
 
index 0b069bf3e708e82ea0e08c5b204f931ca1d22a00..34baa60f8a7bd11f03ccb1f754a08eb1029c46be 100644 (file)
@@ -828,11 +828,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
+       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
-       if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
-               windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+       if (scan_size < MAX_SCAN_WINDOW)
+               windows = MAX_SCAN_WINDOW / scan_size;
        floor = 1000 / windows;
 
        scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -1164,9 +1165,19 @@ static void task_numa_compare(struct task_numa_env *env,
        long moveimp = imp;
 
        rcu_read_lock();
-       cur = ACCESS_ONCE(dst_rq->curr);
-       if (cur->pid == 0) /* idle */
+
+       raw_spin_lock_irq(&dst_rq->lock);
+       cur = dst_rq->curr;
+       /*
+        * No need to move the exiting task, and this ensures that ->curr
+        * wasn't reaped and thus get_task_struct() in task_numa_assign()
+        * is safe under RCU read lock.
+        * Note that rcu_read_lock() itself can't protect from the final
+        * put_task_struct() after the last schedule().
+        */
+       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
         * "imp" is the fault differential for the source task between the
@@ -1520,7 +1531,7 @@ static void update_task_scan_period(struct task_struct *p,
                 * scanning faster if shared accesses dominate as it may
                 * simply bounce migrations uselessly
                 */
-               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
                diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
        }
 
index 4aada6d9fe7445f53e88702fa5e983dc06abd367..15f2511a1b7c6e04a48f99c1b6a402f008607ba3 100644 (file)
@@ -387,7 +387,8 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_numa_balancing_scan_size,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
        {
                .procname       = "numa_balancing",
index 9c94c19f13052fb45391584cf14216f2eb494825..55449909f11475372135ac61b33e65114eb151ba 100644 (file)
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
         * Also omit the add if it would overflow the u64 boundary.
         */
        if ((~0ULL - clc > rnd) &&
-           (!ismax || evt->mult <= (1U << evt->shift)))
+           (!ismax || evt->mult <= (1ULL << evt->shift)))
                clc += rnd;
 
        do_div(clc, evt->mult);
index 42b463ad90f299707ab66fef2810e179db649cd4..31ea01f42e1f088786a291199cc54e9bde4658c9 100644 (file)
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                        goto out;
                }
        } else {
+               memset(&event.sigev_value, 0, sizeof(event.sigev_value));
                event.sigev_notify = SIGEV_SIGNAL;
                event.sigev_signo = SIGALRM;
                event.sigev_value.sival_int = new_timer->it_id;
index fb186b9ddf519159d866fcaa8fc187328820f2bf..31c90fec415899bfb40927bbbcc8ad29657317ab 100644 (file)
@@ -1925,8 +1925,16 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
         * when we are adding another op to the rec or removing the
         * current one. Thus, if the op is being added, we can
         * ignore it because it hasn't attached itself to the rec
-        * yet. That means we just need to find the op that has a
-        * trampoline and is not beeing added.
+        * yet.
+        *
+        * If an ops is being modified (hooking to different functions)
+        * then we don't care about the new functions that are being
+        * added, just the old ones (that are probably being removed).
+        *
+        * If we are adding an ops to a function that already is using
+        * a trampoline, it needs to be removed (trampolines are only
+        * for single ops connected), then an ops that is not being
+        * modified also needs to be checked.
         */
        do_for_each_ftrace_op(op, ftrace_ops_list) {
 
@@ -1940,17 +1948,23 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
                if (op->flags & FTRACE_OPS_FL_ADDING)
                        continue;
 
+
                /*
-                * If the ops is not being added and has a trampoline,
-                * then it must be the one that we want!
+                * If the ops is being modified and is in the old
+                * hash, then it is probably being removed from this
+                * function.
                 */
-               if (hash_contains_ip(ip, op->func_hash))
-                       return op;
-
-               /* If the ops is being modified, it may be in the old hash. */
                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
                    hash_contains_ip(ip, &op->old_hash))
                        return op;
+               /*
+                * If the ops is not being added or modified, and it's
+                * in its normal filter hash, then this must be the one
+                * we want!
+                */
+               if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
+                   hash_contains_ip(ip, op->func_hash))
+                       return op;
 
        } while_for_each_ftrace_op(op);
 
@@ -2293,10 +2307,13 @@ static void ftrace_run_update_code(int command)
        FTRACE_WARN_ON(ret);
 }
 
-static void ftrace_run_modify_code(struct ftrace_ops *ops, int command)
+static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
+                                  struct ftrace_hash *old_hash)
 {
        ops->flags |= FTRACE_OPS_FL_MODIFYING;
+       ops->old_hash.filter_hash = old_hash;
        ftrace_run_update_code(command);
+       ops->old_hash.filter_hash = NULL;
        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
 }
 
@@ -3340,7 +3357,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 
 static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_probe(void)
+static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
 {
        int ret;
        int i;
@@ -3348,7 +3365,8 @@ static void __enable_ftrace_function_probe(void)
        if (ftrace_probe_registered) {
                /* still need to update the function call sites */
                if (ftrace_enabled)
-                       ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS);
+                       ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+                                              old_hash);
                return;
        }
 
@@ -3477,13 +3495,14 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        } while_for_each_ftrace_rec();
 
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+       __enable_ftrace_function_probe(old_hash);
+
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
        else
                count = ret;
 
-       __enable_ftrace_function_probe();
-
  out_unlock:
        mutex_unlock(&ftrace_lock);
  out:
@@ -3764,10 +3783,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
        return add_hash_entry(hash, ip);
 }
 
-static void ftrace_ops_update_code(struct ftrace_ops *ops)
+static void ftrace_ops_update_code(struct ftrace_ops *ops,
+                                  struct ftrace_hash *old_hash)
 {
        if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
-               ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS);
+               ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
 }
 
 static int
@@ -3813,7 +3833,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        old_hash = *orig_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret) {
-               ftrace_ops_update_code(ops);
+               ftrace_ops_update_code(ops, old_hash);
                free_ftrace_hash_rcu(old_hash);
        }
        mutex_unlock(&ftrace_lock);
@@ -4058,7 +4078,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
                if (!ret) {
-                       ftrace_ops_update_code(iter->ops);
+                       ftrace_ops_update_code(iter->ops, old_hash);
                        free_ftrace_hash_rcu(old_hash);
                }
                mutex_unlock(&ftrace_lock);
index 4dc8b79c5f75d214919220bdc45232fe635e6af0..29228c4d569692ea4234d880d13e94233b3e32f4 100644 (file)
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
 
        /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        int syscall_nr;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
 
        /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
index cd250a2e14cb1381d256ef0590b63bfd653cb83d..b499ab6ada29a0d4db2e4692083c2d21441a42ee 100644 (file)
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
                lower = src[off + k];
                if (left && off + k == lim - 1)
                        lower &= mask;
-               dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
+               dst[k] = lower >> rem;
+               if (rem)
+                       dst[k] |= upper << (BITS_PER_LONG - rem);
                if (left && k == lim - 1)
                        dst[k] &= mask;
        }
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
                upper = src[k];
                if (left && k == lim - 1)
                        upper &= (1UL << left) - 1;
-               dst[k + off] = lower  >> (BITS_PER_LONG - rem) | upper << rem;
+               dst[k + off] = upper << rem;
+               if (rem)
+                       dst[k + off] |= lower >> (BITS_PER_LONG - rem);
                if (left && k + off == lim - 1)
                        dst[k + off] &= (1UL << left) - 1;
        }
index 9cdf62f8accdeaf777e8661ba51e3c08617ad06e..c9f2e8c6ccc996c8a40bac6872749185170ec7f9 100644 (file)
@@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
                }
 
                table->orig_nents -= sg_size;
-               if (!skip_first_chunk) {
-                       free_fn(sgl, alloc_size);
+               if (skip_first_chunk)
                        skip_first_chunk = false;
-               }
+               else
+                       free_fn(sgl, alloc_size);
                sgl = next;
        }
 
index b3cbe19f71b5fe48ca853b55b8d5c1163bfdea03..fcad8322ef36781c5c59e92ddee26caeb4c3c5f2 100644 (file)
@@ -68,11 +68,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
                 * to be released by the balloon driver.
                 */
                if (trylock_page(page)) {
+#ifdef CONFIG_BALLOON_COMPACTION
                        if (!PagePrivate(page)) {
                                /* raced with isolation */
                                unlock_page(page);
                                continue;
                        }
+#endif
                        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
                        balloon_page_delete(page);
                        __count_vm_event(BALLOON_DEFLATE);
index edba18aed1738c752793f63ae409c22ae373342a..ec74cf0123efd3944894cc0b159b385d6b837f25 100644 (file)
@@ -784,6 +784,9 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                        cc->nr_migratepages = 0;
                        break;
                }
+
+               if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+                       break;
        }
        acct_isolated(cc->zone, cc);
 
index 74c78aa8bc2fa68454928b09f34a7b97f3419e05..de984159cf0b8a0be6a73b9f421e9a0b4f75c599 100644 (file)
@@ -200,7 +200,7 @@ retry:
        preempt_disable();
        if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
                preempt_enable();
-               __free_page(zero_page);
+               __free_pages(zero_page, compound_order(zero_page));
                goto retry;
        }
 
@@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
                struct page *zero_page = xchg(&huge_zero_page, NULL);
                BUG_ON(zero_page == NULL);
-               __free_page(zero_page);
+               __free_pages(zero_page, compound_order(zero_page));
                return HPAGE_PMD_NR;
        }
 
@@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_FALLBACK;
        if (unlikely(anon_vma_prepare(vma)))
                return VM_FAULT_OOM;
-       if (unlikely(khugepaged_enter(vma)))
+       if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
                return VM_FAULT_OOM;
        if (!(flags & FAULT_FLAG_WRITE) &&
                        transparent_hugepage_use_zero_page()) {
@@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 * register it here without waiting a page fault that
                 * may not happen any time soon.
                 */
-               if (unlikely(khugepaged_enter_vma_merge(vma)))
+               if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
                        return -ENOMEM;
                break;
        case MADV_NOHUGEPAGE:
@@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm)
        return 0;
 }
 
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+                              unsigned long vm_flags)
 {
        unsigned long hstart, hend;
        if (!vma->anon_vma)
@@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
        if (vma->vm_ops)
                /* khugepaged not yet working on file or special mappings */
                return 0;
-       VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+       VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
-               return khugepaged_enter(vma);
+               return khugepaged_enter(vma, vm_flags);
        return 0;
 }
 
index 23976fd885fd588a7687bf5631f98ea38f2235b4..d6ac0e33e150339cba37ecb3db98be3fecd3702a 100644 (file)
@@ -1536,12 +1536,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  *         start move here.
  */
 
-/* for quick checking without looking up memcg */
-atomic_t memcg_moving __read_mostly;
-
 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
 {
-       atomic_inc(&memcg_moving);
        atomic_inc(&memcg->moving_account);
        synchronize_rcu();
 }
@@ -1552,10 +1548,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
         * Now, mem_cgroup_clear_mc() may call this function with NULL.
         * We check NULL in callee rather than caller.
         */
-       if (memcg) {
-               atomic_dec(&memcg_moving);
+       if (memcg)
                atomic_dec(&memcg->moving_account);
-       }
 }
 
 /*
@@ -2204,41 +2198,52 @@ cleanup:
        return true;
 }
 
-/*
- * Used to update mapped file or writeback or other statistics.
+/**
+ * mem_cgroup_begin_page_stat - begin a page state statistics transaction
+ * @page: page that is going to change accounted state
+ * @locked: &memcg->move_lock slowpath was taken
+ * @flags: IRQ-state flags for &memcg->move_lock
  *
- * Notes: Race condition
+ * This function must mark the beginning of an accounted page state
+ * change to prevent double accounting when the page is concurrently
+ * being moved to another memcg:
  *
- * Charging occurs during page instantiation, while the page is
- * unmapped and locked in page migration, or while the page table is
- * locked in THP migration.  No race is possible.
+ *   memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+ *   if (TestClearPageState(page))
+ *     mem_cgroup_update_page_stat(memcg, state, -1);
+ *   mem_cgroup_end_page_stat(memcg, locked, flags);
  *
- * Uncharge happens to pages with zero references, no race possible.
+ * The RCU lock is held throughout the transaction.  The fast path can
+ * get away without acquiring the memcg->move_lock (@locked is false)
+ * because page moving starts with an RCU grace period.
  *
- * Charge moving between groups is protected by checking mm->moving
- * account and taking the move_lock in the slowpath.
+ * The RCU lock also protects the memcg from being freed when the page
+ * state that is going to change is the only thing preventing the page
+ * from being uncharged.  E.g. end-writeback clearing PageWriteback(),
+ * which allows migration to go ahead and uncharge the page before the
+ * account transaction might be complete.
  */
-
-void __mem_cgroup_begin_update_page_stat(struct page *page,
-                               bool *locked, unsigned long *flags)
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
+                                             bool *locked,
+                                             unsigned long *flags)
 {
        struct mem_cgroup *memcg;
        struct page_cgroup *pc;
 
+       rcu_read_lock();
+
+       if (mem_cgroup_disabled())
+               return NULL;
+
        pc = lookup_page_cgroup(page);
 again:
        memcg = pc->mem_cgroup;
        if (unlikely(!memcg || !PageCgroupUsed(pc)))
-               return;
-       /*
-        * If this memory cgroup is not under account moving, we don't
-        * need to take move_lock_mem_cgroup(). Because we already hold
-        * rcu_read_lock(), any calls to move_account will be delayed until
-        * rcu_read_unlock().
-        */
-       VM_BUG_ON(!rcu_read_lock_held());
+               return NULL;
+
+       *locked = false;
        if (atomic_read(&memcg->moving_account) <= 0)
-               return;
+               return memcg;
 
        move_lock_mem_cgroup(memcg, flags);
        if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
@@ -2246,36 +2251,40 @@ again:
                goto again;
        }
        *locked = true;
+
+       return memcg;
 }
 
-void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
+/**
+ * mem_cgroup_end_page_stat - finish a page state statistics transaction
+ * @memcg: the memcg that was accounted against
+ * @locked: value received from mem_cgroup_begin_page_stat()
+ * @flags: value received from mem_cgroup_begin_page_stat()
+ */
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
+                             unsigned long flags)
 {
-       struct page_cgroup *pc = lookup_page_cgroup(page);
+       if (memcg && locked)
+               move_unlock_mem_cgroup(memcg, &flags);
 
-       /*
-        * It's guaranteed that pc->mem_cgroup never changes while
-        * lock is held because a routine modifies pc->mem_cgroup
-        * should take move_lock_mem_cgroup().
-        */
-       move_unlock_mem_cgroup(pc->mem_cgroup, flags);
+       rcu_read_unlock();
 }
 
-void mem_cgroup_update_page_stat(struct page *page,
+/**
+ * mem_cgroup_update_page_stat - update page state statistics
+ * @memcg: memcg to account against
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ *
+ * See mem_cgroup_begin_page_stat() for locking requirements.
+ */
+void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
                                 enum mem_cgroup_stat_index idx, int val)
 {
-       struct mem_cgroup *memcg;
-       struct page_cgroup *pc = lookup_page_cgroup(page);
-       unsigned long uninitialized_var(flags);
-
-       if (mem_cgroup_disabled())
-               return;
-
        VM_BUG_ON(!rcu_read_lock_held());
-       memcg = pc->mem_cgroup;
-       if (unlikely(!memcg || !PageCgroupUsed(pc)))
-               return;
 
-       this_cpu_add(memcg->stat->count[idx], val);
+       if (memcg)
+               this_cpu_add(memcg->stat->count[idx], val);
 }
 
 /*
index 1cc6bfbd872ee17122b6e85859662696f595a363..3e503831e042a6aa7b96d2608ecb570dfffa0aa7 100644 (file)
@@ -1147,6 +1147,7 @@ again:
                                print_bad_pte(vma, addr, ptent, page);
                        if (unlikely(!__tlb_remove_page(tlb, page))) {
                                force_flush = 1;
+                               addr += PAGE_SIZE;
                                break;
                        }
                        continue;
index 29d8693d0c61cf663ee45551aaecff29ff7237cf..252e1dbbed86e9a81011ac8d135d9580969a1141 100644 (file)
@@ -1912,7 +1912,6 @@ void try_offline_node(int nid)
        unsigned long start_pfn = pgdat->node_start_pfn;
        unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
        unsigned long pfn;
-       struct page *pgdat_page = virt_to_page(pgdat);
        int i;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
@@ -1941,10 +1940,6 @@ void try_offline_node(int nid)
        node_set_offline(nid);
        unregister_one_node(nid);
 
-       if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
-               /* node data is allocated from boot memory */
-               return;
-
        /* free waittable in each zone */
        for (i = 0; i < MAX_NR_ZONES; i++) {
                struct zone *zone = pgdat->node_zones + i;
index 7f855206e7fb2bb1f9a30fcf7745bff7a2ae3adb..87e82b38453c2cbca83f1dd7ad472c02b6a73b77 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1080,7 +1080,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                end, prev->vm_pgoff, NULL);
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(prev);
+               khugepaged_enter_vma_merge(prev, vm_flags);
                return prev;
        }
 
@@ -1099,7 +1099,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                next->vm_pgoff - pglen, NULL);
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(area);
+               khugepaged_enter_vma_merge(area, vm_flags);
                return area;
        }
 
@@ -2208,7 +2208,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        vma_unlock_anon_vma(vma);
-       khugepaged_enter_vma_merge(vma);
+       khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(vma->vm_mm);
        return error;
 }
@@ -2277,7 +2277,7 @@ int expand_downwards(struct vm_area_struct *vma,
                }
        }
        vma_unlock_anon_vma(vma);
-       khugepaged_enter_vma_merge(vma);
+       khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(vma->vm_mm);
        return error;
 }
index ff24c9d83112ece05dab0d3b5c941a92252deef0..19ceae87522d9a87ea23457e4ae9eec50882b194 100644 (file)
@@ -2115,23 +2115,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 }
 EXPORT_SYMBOL(account_page_dirtied);
 
-/*
- * Helper function for set_page_writeback family.
- *
- * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
- * while calling this function.
- * See test_set_page_writeback for example.
- *
- * NOTE: Unlike account_page_dirtied this does not rely on being atomic
- * wrt interrupts.
- */
-void account_page_writeback(struct page *page)
-{
-       mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
-       inc_zone_page_state(page, NR_WRITEBACK);
-}
-EXPORT_SYMBOL(account_page_writeback);
-
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
  * its radix tree.
@@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
 int test_clear_page_writeback(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
-       int ret;
-       bool locked;
        unsigned long memcg_flags;
+       struct mem_cgroup *memcg;
+       bool locked;
+       int ret;
 
-       mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
+       memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
        if (mapping) {
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
@@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
                ret = TestClearPageWriteback(page);
        }
        if (ret) {
-               mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+               mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
                dec_zone_page_state(page, NR_WRITEBACK);
                inc_zone_page_state(page, NR_WRITTEN);
        }
-       mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
+       mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
        return ret;
 }
 
 int __test_set_page_writeback(struct page *page, bool keep_write)
 {
        struct address_space *mapping = page_mapping(page);
-       int ret;
-       bool locked;
        unsigned long memcg_flags;
+       struct mem_cgroup *memcg;
+       bool locked;
+       int ret;
 
-       mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
+       memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
        if (mapping) {
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
@@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
        } else {
                ret = TestSetPageWriteback(page);
        }
-       if (!ret)
-               account_page_writeback(page);
-       mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
+       if (!ret) {
+               mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
+               inc_zone_page_state(page, NR_WRITEBACK);
+       }
+       mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
        return ret;
 
 }
index 3708264d28332d4a8ec17bd929110a5b212f5529..5331c2bd85a2cc5c838881cdd12870ad9502029d 100644 (file)
@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
                        sizeof(struct page_cgroup) * PAGES_PER_SECTION;
 
                BUG_ON(PageReserved(page));
+               kmemleak_free(addr);
                free_pages_exact(addr, table_size);
        }
 }
index 116a5053415b8dbc41c39622316e6b8b417a3727..19886fb2f13aac6a659ba1ae8b9e4db2f8efa7a4 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page,
  */
 void page_add_file_rmap(struct page *page)
 {
-       bool locked;
+       struct mem_cgroup *memcg;
        unsigned long flags;
+       bool locked;
 
-       mem_cgroup_begin_update_page_stat(page, &locked, &flags);
+       memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
        if (atomic_inc_and_test(&page->_mapcount)) {
                __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+               mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
        }
-       mem_cgroup_end_update_page_stat(page, &locked, &flags);
+       mem_cgroup_end_page_stat(memcg, locked, flags);
+}
+
+static void page_remove_file_rmap(struct page *page)
+{
+       struct mem_cgroup *memcg;
+       unsigned long flags;
+       bool locked;
+
+       memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+
+       /* page still mapped by someone else? */
+       if (!atomic_add_negative(-1, &page->_mapcount))
+               goto out;
+
+       /* Hugepages are not counted in NR_FILE_MAPPED for now. */
+       if (unlikely(PageHuge(page)))
+               goto out;
+
+       /*
+        * We use the irq-unsafe __{inc|mod}_zone_page_stat because
+        * these counters are not modified in interrupt context, and
+        * pte lock(a spinlock) is held, which implies preemption disabled.
+        */
+       __dec_zone_page_state(page, NR_FILE_MAPPED);
+       mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+
+       if (unlikely(PageMlocked(page)))
+               clear_page_mlock(page);
+out:
+       mem_cgroup_end_page_stat(memcg, locked, flags);
 }
 
 /**
@@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page)
 {
-       bool anon = PageAnon(page);
-       bool locked;
-       unsigned long flags;
-
-       /*
-        * The anon case has no mem_cgroup page_stat to update; but may
-        * uncharge_page() below, where the lock ordering can deadlock if
-        * we hold the lock against page_stat move: so avoid it on anon.
-        */
-       if (!anon)
-               mem_cgroup_begin_update_page_stat(page, &locked, &flags);
+       if (!PageAnon(page)) {
+               page_remove_file_rmap(page);
+               return;
+       }
 
        /* page still mapped by someone else? */
        if (!atomic_add_negative(-1, &page->_mapcount))
-               goto out;
+               return;
+
+       /* Hugepages are not counted in NR_ANON_PAGES for now. */
+       if (unlikely(PageHuge(page)))
+               return;
 
        /*
-        * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
-        * and not charged by memcg for now.
-        *
         * We use the irq-unsafe __{inc|mod}_zone_page_stat because
         * these counters are not modified in interrupt context, and
-        * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       if (unlikely(PageHuge(page)))
-               goto out;
-       if (anon) {
-               if (PageTransHuge(page))
-                       __dec_zone_page_state(page,
-                                             NR_ANON_TRANSPARENT_HUGEPAGES);
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-                               -hpage_nr_pages(page));
-       } else {
-               __dec_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
-               mem_cgroup_end_update_page_stat(page, &locked, &flags);
-       }
+       if (PageTransHuge(page))
+               __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+
+       __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
+                             -hpage_nr_pages(page));
+
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
+
        /*
         * It would be tidy to reset the PageAnon mapping here,
         * but that might overwrite a racing page_add_anon_rmap
@@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page)
         * Leaving it set also helps swapoff to reinstate ptes
         * faster for those pages still in swapcache.
         */
-       return;
-out:
-       if (!anon)
-               mem_cgroup_end_update_page_stat(page, &locked, &flags);
 }
 
 /*
index 3a6e0cfdf03add7f19f4dc9ebdf7cc8bf6fad695..406944207b61dbd607bc7f2d2b244b6998f47254 100644 (file)
@@ -93,16 +93,6 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
                               s->object_size);
                        continue;
                }
-
-#if !defined(CONFIG_SLUB)
-               if (!strcmp(s->name, name)) {
-                       pr_err("%s (%s): Cache name already exists.\n",
-                              __func__, name);
-                       dump_stack();
-                       s = NULL;
-                       return -EINVAL;
-               }
-#endif
        }
 
        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
index 6272420a721b0f01e15ffc572251a8f0794d5d8f..99815b5454bf4d68fdc36153951e434f7219b4d0 100644 (file)
@@ -6,7 +6,7 @@ menuconfig NET
        bool "Networking support"
        select NLATTR
        select GENERIC_NET_UTILS
-       select ANON_INODES
+       select BPF
        ---help---
          Unless you really know what you are doing, you should say Y here.
          The reason is that some programs need kernel networking support even
index 992ec49a96aa7e289bd3c74bca9a606762c63614..44cb786b925a2e857633b47b8ce3deaa22c6a5d8 100644 (file)
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 
        kfree_skb(skb);
 }
+EXPORT_SYMBOL_GPL(br_deliver);
 
 /* called with rcu_read_lock */
 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
index 1bada53bb195ca4c35950af943e049a8ddf8347a..1a4f32c09ad51707c063f6eebe14abe747ed0d26 100644 (file)
@@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
 
 static int br_parse_ip_options(struct sk_buff *skb)
 {
-       struct ip_options *opt;
        const struct iphdr *iph;
        struct net_device *dev = skb->dev;
        u32 len;
@@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb)
                goto inhdr_error;
 
        iph = ip_hdr(skb);
-       opt = &(IPCB(skb)->opt);
 
        /* Basic sanity checks */
        if (iph->ihl < 5 || iph->version != 4)
@@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb)
        }
 
        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-       if (iph->ihl == 5)
-               return 0;
-
-       opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
-       if (ip_options_compile(dev_net(dev), opt, skb))
-               goto inhdr_error;
-
-       /* Check correct handling of SRR option */
-       if (unlikely(opt->srr)) {
-               struct in_device *in_dev = __in_dev_get_rcu(dev);
-               if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
-                       goto drop;
-
-               if (ip_options_rcv_srr(skb))
-                       goto drop;
-       }
-
+       /* We should really parse IP options here but until
+        * somebody who actually uses IP options complains to
+        * us we'll just silently ignore the options because
+        * we're lazy!
+        */
        return 0;
 
 inhdr_error:
index da17a5eab8b40e5df3eeafcef74934534eb26dd1..074c557ab505eb75bd0970a3045a78f0ca5c85dd 100644 (file)
@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = {
        .type           = NFT_CHAIN_T_DEFAULT,
        .family         = NFPROTO_BRIDGE,
        .owner          = THIS_MODULE,
-       .hook_mask      = (1 << NF_BR_LOCAL_IN) |
+       .hook_mask      = (1 << NF_BR_PRE_ROUTING) |
+                         (1 << NF_BR_LOCAL_IN) |
                          (1 << NF_BR_FORWARD) |
-                         (1 << NF_BR_LOCAL_OUT),
+                         (1 << NF_BR_LOCAL_OUT) |
+                         (1 << NF_BR_POST_ROUTING),
 };
 
 static int __init nf_tables_bridge_init(void)
index a76479535df2a5005968c18422d999aa897b3556..654c9018e3e75a5f0bd01ba2197a7777e8626bf5 100644 (file)
 #include <net/netfilter/nft_reject.h>
 #include <net/netfilter/ipv4/nf_reject.h>
 #include <net/netfilter/ipv6/nf_reject.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/netfilter_bridge.h>
+#include "../br_private.h"
+
+static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
+                                       struct sk_buff *nskb)
+{
+       struct ethhdr *eth;
+
+       eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
+       skb_reset_mac_header(nskb);
+       ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
+       ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
+       eth->h_proto = eth_hdr(oldskb)->h_proto;
+       skb_pull(nskb, ETH_HLEN);
+}
+
+static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
+{
+       struct iphdr *iph;
+       u32 len;
+
+       if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
+               return 0;
+
+       iph = ip_hdr(oldskb);
+       if (iph->ihl < 5 || iph->version != 4)
+               return 0;
+
+       len = ntohs(iph->tot_len);
+       if (oldskb->len < len)
+               return 0;
+       else if (len < (iph->ihl*4))
+               return 0;
+
+       if (!pskb_may_pull(oldskb, iph->ihl*4))
+               return 0;
+
+       return 1;
+}
+
+static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
+{
+       struct sk_buff *nskb;
+       struct iphdr *niph;
+       const struct tcphdr *oth;
+       struct tcphdr _oth;
+
+       if (!nft_reject_iphdr_validate(oldskb))
+               return;
+
+       oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
+       if (!oth)
+               return;
+
+       nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+                        LL_MAX_HEADER, GFP_ATOMIC);
+       if (!nskb)
+               return;
+
+       skb_reserve(nskb, LL_MAX_HEADER);
+       niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+                                  sysctl_ip_default_ttl);
+       nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+       niph->ttl       = sysctl_ip_default_ttl;
+       niph->tot_len   = htons(nskb->len);
+       ip_send_check(niph);
+
+       nft_reject_br_push_etherhdr(oldskb, nskb);
+
+       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+}
+
+static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
+                                         u8 code)
+{
+       struct sk_buff *nskb;
+       struct iphdr *niph;
+       struct icmphdr *icmph;
+       unsigned int len;
+       void *payload;
+       __wsum csum;
+
+       if (!nft_reject_iphdr_validate(oldskb))
+               return;
+
+       /* IP header checks: fragment. */
+       if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+               return;
+
+       /* RFC says return as much as we can without exceeding 576 bytes. */
+       len = min_t(unsigned int, 536, oldskb->len);
+
+       if (!pskb_may_pull(oldskb, len))
+               return;
+
+       if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
+               return;
+
+       nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
+                        LL_MAX_HEADER + len, GFP_ATOMIC);
+       if (!nskb)
+               return;
+
+       skb_reserve(nskb, LL_MAX_HEADER);
+       niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
+                                  sysctl_ip_default_ttl);
+
+       skb_reset_transport_header(nskb);
+       icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
+       memset(icmph, 0, sizeof(*icmph));
+       icmph->type     = ICMP_DEST_UNREACH;
+       icmph->code     = code;
+
+       payload = skb_put(nskb, len);
+       memcpy(payload, skb_network_header(oldskb), len);
+
+       csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
+       icmph->checksum = csum_fold(csum);
+
+       niph->tot_len   = htons(nskb->len);
+       ip_send_check(niph);
+
+       nft_reject_br_push_etherhdr(oldskb, nskb);
+
+       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+}
+
+static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
+{
+       struct ipv6hdr *hdr;
+       u32 pkt_len;
+
+       if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
+               return 0;
+
+       hdr = ipv6_hdr(oldskb);
+       if (hdr->version != 6)
+               return 0;
+
+       pkt_len = ntohs(hdr->payload_len);
+       if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
+               return 0;
+
+       return 1;
+}
+
+static void nft_reject_br_send_v6_tcp_reset(struct net *net,
+                                           struct sk_buff *oldskb, int hook)
+{
+       struct sk_buff *nskb;
+       const struct tcphdr *oth;
+       struct tcphdr _oth;
+       unsigned int otcplen;
+       struct ipv6hdr *nip6h;
+
+       if (!nft_reject_ip6hdr_validate(oldskb))
+               return;
+
+       oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
+       if (!oth)
+               return;
+
+       nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
+                        LL_MAX_HEADER, GFP_ATOMIC);
+       if (!nskb)
+               return;
+
+       skb_reserve(nskb, LL_MAX_HEADER);
+       nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+                                    net->ipv6.devconf_all->hop_limit);
+       nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
+       nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+       nft_reject_br_push_etherhdr(oldskb, nskb);
+
+       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+}
+
+static void nft_reject_br_send_v6_unreach(struct net *net,
+                                         struct sk_buff *oldskb, int hook,
+                                         u8 code)
+{
+       struct sk_buff *nskb;
+       struct ipv6hdr *nip6h;
+       struct icmp6hdr *icmp6h;
+       unsigned int len;
+       void *payload;
+
+       if (!nft_reject_ip6hdr_validate(oldskb))
+               return;
+
+       /* Include "As much of invoking packet as possible without the ICMPv6
+        * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
+        */
+       len = min_t(unsigned int, 1220, oldskb->len);
+
+       if (!pskb_may_pull(oldskb, len))
+               return;
+
+       nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
+                        LL_MAX_HEADER + len, GFP_ATOMIC);
+       if (!nskb)
+               return;
+
+       skb_reserve(nskb, LL_MAX_HEADER);
+       nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
+                                    net->ipv6.devconf_all->hop_limit);
+
+       skb_reset_transport_header(nskb);
+       icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
+       memset(icmp6h, 0, sizeof(*icmp6h));
+       icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
+       icmp6h->icmp6_code = code;
+
+       payload = skb_put(nskb, len);
+       memcpy(payload, skb_network_header(oldskb), len);
+       nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+       icmp6h->icmp6_cksum =
+               csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
+                               nskb->len - sizeof(struct ipv6hdr),
+                               IPPROTO_ICMPV6,
+                               csum_partial(icmp6h,
+                                            nskb->len - sizeof(struct ipv6hdr),
+                                            0));
+
+       nft_reject_br_push_etherhdr(oldskb, nskb);
+
+       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+}
 
 static void nft_reject_bridge_eval(const struct nft_expr *expr,
                                 struct nft_data data[NFT_REG_MAX + 1],
@@ -23,35 +255,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
 {
        struct nft_reject *priv = nft_expr_priv(expr);
        struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+       const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
+
+       if (is_broadcast_ether_addr(dest) ||
+           is_multicast_ether_addr(dest))
+               goto out;
 
        switch (eth_hdr(pkt->skb)->h_proto) {
        case htons(ETH_P_IP):
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nf_send_unreach(pkt->skb, priv->icmp_code);
+                       nft_reject_br_send_v4_unreach(pkt->skb,
+                                                     pkt->ops->hooknum,
+                                                     priv->icmp_code);
                        break;
                case NFT_REJECT_TCP_RST:
-                       nf_send_reset(pkt->skb, pkt->ops->hooknum);
+                       nft_reject_br_send_v4_tcp_reset(pkt->skb,
+                                                       pkt->ops->hooknum);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
-                       nf_send_unreach(pkt->skb,
-                                       nft_reject_icmp_code(priv->icmp_code));
+                       nft_reject_br_send_v4_unreach(pkt->skb,
+                                                     pkt->ops->hooknum,
+                                                     nft_reject_icmp_code(priv->icmp_code));
                        break;
                }
                break;
        case htons(ETH_P_IPV6):
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nf_send_unreach6(net, pkt->skb, priv->icmp_code,
-                                        pkt->ops->hooknum);
+                       nft_reject_br_send_v6_unreach(net, pkt->skb,
+                                                     pkt->ops->hooknum,
+                                                     priv->icmp_code);
                        break;
                case NFT_REJECT_TCP_RST:
-                       nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+                       nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
+                                                       pkt->ops->hooknum);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
-                       nf_send_unreach6(net, pkt->skb,
-                                        nft_reject_icmpv6_code(priv->icmp_code),
-                                        pkt->ops->hooknum);
+                       nft_reject_br_send_v6_unreach(net, pkt->skb,
+                                                     pkt->ops->hooknum,
+                                                     nft_reject_icmpv6_code(priv->icmp_code));
                        break;
                }
                break;
@@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
                /* No explicit way to reject this protocol, drop it. */
                break;
        }
+out:
        data[NFT_REG_VERDICT].verdict = NF_DROP;
 }
 
+static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
+{
+       struct nft_base_chain *basechain;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               basechain = nft_base_chain(chain);
+
+               switch (basechain->ops[0].hooknum) {
+               case NF_BR_PRE_ROUTING:
+               case NF_BR_LOCAL_IN:
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       }
+       return 0;
+}
+
 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
                                  const struct nft_expr *expr,
                                  const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code;
+       int icmp_code, err;
+
+       err = nft_reject_bridge_validate_hooks(ctx->chain);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
@@ -116,6 +382,13 @@ nla_put_failure:
        return -1;
 }
 
+static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+                                     const struct nft_expr *expr,
+                                     const struct nft_data **data)
+{
+       return nft_reject_bridge_validate_hooks(ctx->chain);
+}
+
 static struct nft_expr_type nft_reject_bridge_type;
 static const struct nft_expr_ops nft_reject_bridge_ops = {
        .type           = &nft_reject_bridge_type,
@@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
        .eval           = nft_reject_bridge_eval,
        .init           = nft_reject_bridge_init,
        .dump           = nft_reject_bridge_dump,
+       .validate       = nft_reject_bridge_validate,
 };
 
 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
index b793e3521a3631319bf4d0e7c17c0c9a933331da..945bbd0013592634be3bc840544d8b25919ea11d 100644 (file)
@@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive);
 
 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 {
+       if (unlikely(skb->pfmemalloc)) {
+               consume_skb(skb);
+               return;
+       }
        __skb_pull(skb, skb_headlen(skb));
        /* restore the reserve we had after netdev_alloc_skb_ip_align() */
        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
index 1600aa24d36bcaa653dff0b0c34f4d700eb4fbb9..06dfb293e5aafa10ab2754bfb80d29ad29f2db95 100644 (file)
@@ -1036,7 +1036,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
 {
        const struct ethtool_ops *ops = dev->ethtool_ops;
 
-       if (!ops->get_eeprom || !ops->get_eeprom_len)
+       if (!ops->get_eeprom || !ops->get_eeprom_len ||
+           !ops->get_eeprom_len(dev))
                return -EOPNOTSUPP;
 
        return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
@@ -1052,7 +1053,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
        u8 *data;
        int ret = 0;
 
-       if (!ops->set_eeprom || !ops->get_eeprom_len)
+       if (!ops->set_eeprom || !ops->get_eeprom_len ||
+           !ops->get_eeprom_len(dev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
index 61059a05ec95f1fdc420e7bbb250d1a8b22c87b0..c16615bfb61edd2a1dae9ef7935a3153d78dc4df 100644 (file)
@@ -4070,15 +4070,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
 {
        const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       unsigned int thlen = 0;
 
-       if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-               return tcp_hdrlen(skb) + shinfo->gso_size;
+       if (skb->encapsulation) {
+               thlen = skb_inner_transport_header(skb) -
+                       skb_transport_header(skb);
 
+               if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+                       thlen += inner_tcp_hdrlen(skb);
+       } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+               thlen = tcp_hdrlen(skb);
+       }
        /* UFO sets gso_size to the size of the fragmentation
         * payload, i.e. the size of the L4 (UDP) header is already
         * accounted for.
         */
-       return shinfo->gso_size;
+       return thlen + shinfo->gso_size;
 }
 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
 
index 8c3203c585b0c116e7d8ea8f66feeda7c4a73452..630b30b4fb5368332428289e449b3247be0cd97b 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/export.h>
 #include <net/ip.h>
 #include <net/tso.h>
+#include <asm/unaligned.h>
 
 /* Calculate expected number of TX descriptors */
 int tso_count_descs(struct sk_buff *skb)
@@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
        iph->id = htons(tso->ip_id);
        iph->tot_len = htons(size + hdr_len - mac_hdr_len);
        tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
-       tcph->seq = htonl(tso->tcp_seq);
+       put_unaligned_be32(tso->tcp_seq, &tcph->seq);
        tso->ip_id++;
 
        if (!is_last) {
index 22f34cf4cb27d1aad06dcb41a1f6d84d4dffd688..6317b41c99b0a58bb82ede4a1205d8c3e7f94283 100644 (file)
@@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                        dst->rcv = brcm_netdev_ops.rcv;
                        break;
 #endif
-               default:
+               case DSA_TAG_PROTO_NONE:
                        break;
+               default:
+                       ret = -ENOPROTOOPT;
+                       goto out;
                }
 
                dst->tag_protocol = drv->tag_protocol;
index 92db7a69f2b9d106d7d235388719d3a476eb91d5..8b7fe5b039068559a931b931529f02841cc13fe2 100644 (file)
@@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 
        encap = SKB_GSO_CB(skb)->encap_level > 0;
        if (encap)
-               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+               features &= skb->dev->hw_enc_features;
        SKB_GSO_CB(skb)->encap_level += ihl;
 
        skb_reset_transport_header(skb);
index ccda09628de7e74f0f4f123bc3834afd12fd69a8..bb5947b0ce2dfbd94a0b85a13983c060b2984075 100644 (file)
@@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
 
        greh = (struct gre_base_hdr *)skb_transport_header(skb);
 
-       ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
+       ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
        if (unlikely(ghl < sizeof(*greh)))
                goto out;
 
@@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        skb->mac_len = skb_inner_network_offset(skb);
 
        /* segment inner packet. */
-       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       enc_features = skb->dev->hw_enc_features & features;
        segs = skb_mac_gso_segment(skb, enc_features);
        if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
index 9eb89f3f0ee4cb88bf065956c45453999d799aa8..19419b60cb37d1c59c8238b49eab77d87ff0b4a1 100644 (file)
@@ -146,7 +146,6 @@ evict_again:
                        atomic_inc(&fq->refcnt);
                        spin_unlock(&hb->chain_lock);
                        del_timer_sync(&fq->timer);
-                       WARN_ON(atomic_read(&fq->refcnt) != 1);
                        inet_frag_put(fq, f);
                        goto evict_again;
                }
@@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
        struct inet_frag_bucket *hb;
 
        hb = get_frag_bucket_locked(fq, f);
-       hlist_del(&fq->list);
+       if (!(fq->flags & INET_FRAG_EVICTED))
+               hlist_del(&fq->list);
        spin_unlock(&hb->chain_lock);
 }
 
index 88e5ef2c7f511fe75616ac2d70fdda22c9ac8b5d..bc6471d4abcdaa464d9effc244c5f1bdcb989681 100644 (file)
@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
         */
        features = netif_skb_features(skb);
        segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
+       if (IS_ERR_OR_NULL(segs)) {
                kfree_skb(skb);
                return -ENOMEM;
        }
index b023b4eb1a966415c1ae43582428566036379d7a..1baaa83dfe5ce63b8ed7cb8fb6e9e3798671e4de 100644 (file)
@@ -6,48 +6,45 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/module.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/route.h>
 #include <net/dst.h>
 #include <linux/netfilter_ipv4.h>
+#include <net/netfilter/ipv4/nf_reject.h>
 
-/* Send RST reply */
-void nf_send_reset(struct sk_buff *oldskb, int hook)
+const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+                                            struct tcphdr *_oth, int hook)
 {
-       struct sk_buff *nskb;
-       const struct iphdr *oiph;
-       struct iphdr *niph;
        const struct tcphdr *oth;
-       struct tcphdr _otcph, *tcph;
 
        /* IP header checks: fragment. */
        if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
-               return;
+               return NULL;
 
        oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
-                                sizeof(_otcph), &_otcph);
+                                sizeof(struct tcphdr), _oth);
        if (oth == NULL)
-               return;
+               return NULL;
 
        /* No RST for RST. */
        if (oth->rst)
-               return;
-
-       if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
-               return;
+               return NULL;
 
        /* Check checksum */
        if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
-               return;
-       oiph = ip_hdr(oldskb);
+               return NULL;
 
-       nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
-                        LL_MAX_HEADER, GFP_ATOMIC);
-       if (!nskb)
-               return;
+       return oth;
+}
+EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
 
-       skb_reserve(nskb, LL_MAX_HEADER);
+struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+                                 const struct sk_buff *oldskb,
+                                 __be16 protocol, int ttl)
+{
+       struct iphdr *niph, *oiph = ip_hdr(oldskb);
 
        skb_reset_network_header(nskb);
        niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
@@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
        niph->tos       = 0;
        niph->id        = 0;
        niph->frag_off  = htons(IP_DF);
-       niph->protocol  = IPPROTO_TCP;
+       niph->protocol  = protocol;
        niph->check     = 0;
        niph->saddr     = oiph->daddr;
        niph->daddr     = oiph->saddr;
+       niph->ttl       = ttl;
+
+       nskb->protocol = htons(ETH_P_IP);
+
+       return niph;
+}
+EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
+
+void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+                         const struct tcphdr *oth)
+{
+       struct iphdr *niph = ip_hdr(nskb);
+       struct tcphdr *tcph;
 
        skb_reset_transport_header(nskb);
        tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
@@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
        tcph->dest      = oth->source;
        tcph->doff      = sizeof(struct tcphdr) / 4;
 
-       if (oth->ack)
+       if (oth->ack) {
                tcph->seq = oth->ack_seq;
-       else {
+       else {
                tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
                                      oldskb->len - ip_hdrlen(oldskb) -
                                      (oth->doff << 2));
@@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
        nskb->ip_summed = CHECKSUM_PARTIAL;
        nskb->csum_start = (unsigned char *)tcph - nskb->head;
        nskb->csum_offset = offsetof(struct tcphdr, check);
+}
+EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
+
+/* Send RST reply */
+void nf_send_reset(struct sk_buff *oldskb, int hook)
+{
+       struct sk_buff *nskb;
+       const struct iphdr *oiph;
+       struct iphdr *niph;
+       const struct tcphdr *oth;
+       struct tcphdr _oth;
+
+       oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
+       if (!oth)
+               return;
+
+       if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+               return;
+
+       oiph = ip_hdr(oldskb);
+
+       nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+                        LL_MAX_HEADER, GFP_ATOMIC);
+       if (!nskb)
+               return;
 
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
-       nskb->protocol = htons(ETH_P_IP);
+       skb_reserve(nskb, LL_MAX_HEADER);
+       niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+                                  ip4_dst_hoplimit(skb_dst(nskb)));
+       nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+
        if (ip_route_me_harder(nskb, RTN_UNSPEC))
                goto free_nskb;
 
-       niph->ttl       = ip4_dst_hoplimit(skb_dst(nskb));
-
        /* "Never happens" */
        if (nskb->len > dst_mtu(skb_dst(nskb)))
                goto free_nskb;
@@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
        kfree_skb(nskb);
 }
 EXPORT_SYMBOL_GPL(nf_send_reset);
+
+MODULE_LICENSE("GPL");
index 1c636d6b5b5007b597e95b7852d7f76d750a4040..c1023c4459201ad63394606cab4b42fae61257aa 100644 (file)
@@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = {
        .eval           = nft_masq_ipv4_eval,
        .init           = nft_masq_init,
        .dump           = nft_masq_dump,
+       .validate       = nft_masq_validate,
 };
 
 static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
index 2d4ae469b471a4fff941e0dd13d014dfd222f6a8..6a2155b02602b100c7ce3bbfda28a38090add7a4 100644 (file)
@@ -1798,6 +1798,7 @@ local_input:
 no_route:
        RT_CACHE_STAT_INC(in_no_route);
        res.type = RTN_UNREACHABLE;
+       res.fi = NULL;
        goto local_input;
 
        /*
index 1bec4e76d88c5852d8ba3392b22aa58d6453ab4d..39ec0c379545afe07d6016c9180e9c408d22d4df 100644 (file)
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
+static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
 static DEFINE_MUTEX(tcp_md5sig_mutex);
-
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
-
-               if (p->md5_desc.tfm)
-                       crypto_free_hash(p->md5_desc.tfm);
-       }
-       free_percpu(pool);
-}
+static bool tcp_md5sig_pool_populated = false;
 
 static void __tcp_alloc_md5sig_pool(void)
 {
        int cpu;
-       struct tcp_md5sig_pool __percpu *pool;
-
-       pool = alloc_percpu(struct tcp_md5sig_pool);
-       if (!pool)
-               return;
 
        for_each_possible_cpu(cpu) {
-               struct crypto_hash *hash;
-
-               hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR_OR_NULL(hash))
-                       goto out_free;
+               if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
+                       struct crypto_hash *hash;
 
-               per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
+                       hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+                       if (IS_ERR_OR_NULL(hash))
+                               return;
+                       per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
+               }
        }
-       /* before setting tcp_md5sig_pool, we must commit all writes
-        * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
+       /* before setting tcp_md5sig_pool_populated, we must commit all writes
+        * to memory. See smp_rmb() in tcp_get_md5sig_pool()
         */
        smp_wmb();
-       tcp_md5sig_pool = pool;
-       return;
-out_free:
-       __tcp_free_md5sig_pool(pool);
+       tcp_md5sig_pool_populated = true;
 }
 
 bool tcp_alloc_md5sig_pool(void)
 {
-       if (unlikely(!tcp_md5sig_pool)) {
+       if (unlikely(!tcp_md5sig_pool_populated)) {
                mutex_lock(&tcp_md5sig_mutex);
 
-               if (!tcp_md5sig_pool)
+               if (!tcp_md5sig_pool_populated)
                        __tcp_alloc_md5sig_pool();
 
                mutex_unlock(&tcp_md5sig_mutex);
        }
-       return tcp_md5sig_pool != NULL;
+       return tcp_md5sig_pool_populated;
 }
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
  */
 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool __percpu *p;
-
        local_bh_disable();
-       p = ACCESS_ONCE(tcp_md5sig_pool);
-       if (p)
-               return raw_cpu_ptr(p);
 
+       if (tcp_md5sig_pool_populated) {
+               /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
+               smp_rmb();
+               return this_cpu_ptr(&tcp_md5sig_pool);
+       }
        local_bh_enable();
        return NULL;
 }
index 94d1a7757ff7462edf6c32406df7187b4839bf4f..9c7d7621466b1241f404a5ca11de809dcff2d02a 100644 (file)
@@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_dport = usin->sin_port;
        inet->inet_daddr = daddr;
 
-       inet_set_txhash(sk);
-
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
+       inet_set_txhash(sk);
+
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
        if (IS_ERR(rt)) {
index 3af21296d96788b899daaa25562301e38036e802..a3d453b94747c22dd0fad07dfe606fdd11c414b1 100644 (file)
@@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
 static bool skb_still_in_host_queue(const struct sock *sk,
                                    const struct sk_buff *skb)
 {
-       if (unlikely(skb_fclone_busy(skb))) {
+       if (unlikely(skb_fclone_busy(sk, skb))) {
                NET_INC_STATS_BH(sock_net(sk),
                                 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
                return true;
index 507310ef4b5681bd8e59ba779e0560ecc3e5d04e..6480cea7aa53a6df85a970279b82e3b9ed5a33e6 100644 (file)
@@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                skb->encap_hdr_csum = 1;
 
        /* segment inner packet. */
-       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       enc_features = skb->dev->hw_enc_features & features;
        segs = gso_inner_segment(skb, enc_features);
        if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
index 725c763270a067e3945ec3056c4f5893cf9f7060..0169ccf5aa4fec8f691ea3bed77b4c7e5587d2a5 100644 (file)
@@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        }
 
        write_unlock_bh(&idev->lock);
+       inet6_ifinfo_notify(RTM_NEWLINK, idev);
        addrconf_verify_rtnl();
        return 0;
 }
index 91014d32488ded4a7eeb0e00d27ee925bc48f8da..a071563a7e6e9c1f6d0fa9d8490c1d35ce89b3f7 100644 (file)
@@ -90,7 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        encap = SKB_GSO_CB(skb)->encap_level > 0;
        if (encap)
-               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+               features &= skb->dev->hw_enc_features;
        SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
 
        ipv6h = ipv6_hdr(skb);
index 5f5f0438d74d9b76596b1e5633189402c5925d80..015eb8a80766f477ed797d496762f4fb2e6f7661 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#include <linux/module.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_checksum.h>
 #include <linux/netfilter_ipv6.h>
+#include <net/netfilter/ipv6/nf_reject.h>
 
-void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+                                             struct tcphdr *otcph,
+                                             unsigned int *otcplen, int hook)
 {
-       struct sk_buff *nskb;
-       struct tcphdr otcph, *tcph;
-       unsigned int otcplen, hh_len;
-       int tcphoff, needs_ack;
        const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
-       struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE      0x0U
-       const __u8 tclass = DEFAULT_TOS_VALUE;
-       struct dst_entry *dst = NULL;
        u8 proto;
        __be16 frag_off;
-       struct flowi6 fl6;
-
-       if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
-           (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
-               pr_debug("addr is not unicast.\n");
-               return;
-       }
+       int tcphoff;
 
        proto = oip6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
+       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
+                                  &proto, &frag_off);
 
        if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
                pr_debug("Cannot get TCP header.\n");
-               return;
+               return NULL;
        }
 
-       otcplen = oldskb->len - tcphoff;
+       *otcplen = oldskb->len - tcphoff;
 
        /* IP header checks: fragment, too short. */
-       if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
-               pr_debug("proto(%d) != IPPROTO_TCP, "
-                        "or too short. otcplen = %d\n",
-                        proto, otcplen);
-               return;
+       if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
+               pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
+                        proto, *otcplen);
+               return NULL;
        }
 
-       if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
-               BUG();
+       otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
+                                  otcph);
+       if (otcph == NULL)
+               return NULL;
 
        /* No RST for RST. */
-       if (otcph.rst) {
+       if (otcph->rst) {
                pr_debug("RST is set\n");
-               return;
+               return NULL;
        }
 
        /* Check checksum. */
        if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
                pr_debug("TCP checksum is invalid\n");
-               return;
+               return NULL;
        }
 
-       memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_TCP;
-       fl6.saddr = oip6h->daddr;
-       fl6.daddr = oip6h->saddr;
-       fl6.fl6_sport = otcph.dest;
-       fl6.fl6_dport = otcph.source;
-       security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
-       dst = ip6_route_output(net, NULL, &fl6);
-       if (dst == NULL || dst->error) {
-               dst_release(dst);
-               return;
-       }
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
-       if (IS_ERR(dst))
-               return;
-
-       hh_len = (dst->dev->hard_header_len + 15)&~15;
-       nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
-                        + sizeof(struct tcphdr) + dst->trailer_len,
-                        GFP_ATOMIC);
-
-       if (!nskb) {
-               net_dbg_ratelimited("cannot alloc skb\n");
-               dst_release(dst);
-               return;
-       }
-
-       skb_dst_set(nskb, dst);
+       return otcph;
+}
+EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
 
-       skb_reserve(nskb, hh_len + dst->header_len);
+struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
+                                    const struct sk_buff *oldskb,
+                                    __be16 protocol, int hoplimit)
+{
+       struct ipv6hdr *ip6h;
+       const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+#define DEFAULT_TOS_VALUE      0x0U
+       const __u8 tclass = DEFAULT_TOS_VALUE;
 
        skb_put(nskb, sizeof(struct ipv6hdr));
        skb_reset_network_header(nskb);
        ip6h = ipv6_hdr(nskb);
        ip6_flow_hdr(ip6h, tclass, 0);
-       ip6h->hop_limit = ip6_dst_hoplimit(dst);
-       ip6h->nexthdr = IPPROTO_TCP;
+       ip6h->hop_limit = hoplimit;
+       ip6h->nexthdr = protocol;
        ip6h->saddr = oip6h->daddr;
        ip6h->daddr = oip6h->saddr;
 
+       nskb->protocol = htons(ETH_P_IPV6);
+
+       return ip6h;
+}
+EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
+
+void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+                             const struct sk_buff *oldskb,
+                             const struct tcphdr *oth, unsigned int otcplen)
+{
+       struct tcphdr *tcph;
+       int needs_ack;
+
        skb_reset_transport_header(nskb);
        tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
        /* Truncate to length (no data) */
        tcph->doff = sizeof(struct tcphdr)/4;
-       tcph->source = otcph.dest;
-       tcph->dest = otcph.source;
+       tcph->source = oth->dest;
+       tcph->dest = oth->source;
 
-       if (otcph.ack) {
+       if (oth->ack) {
                needs_ack = 0;
-               tcph->seq = otcph.ack_seq;
+               tcph->seq = oth->ack_seq;
                tcph->ack_seq = 0;
        } else {
                needs_ack = 1;
-               tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
-                                     + otcplen - (otcph.doff<<2));
+               tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+                                     otcplen - (oth->doff<<2));
                tcph->seq = 0;
        }
 
@@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
                                      sizeof(struct tcphdr), IPPROTO_TCP,
                                      csum_partial(tcph,
                                                   sizeof(struct tcphdr), 0));
+}
+EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
+
+void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+{
+       struct sk_buff *nskb;
+       struct tcphdr _otcph;
+       const struct tcphdr *otcph;
+       unsigned int otcplen, hh_len;
+       const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+       struct ipv6hdr *ip6h;
+       struct dst_entry *dst = NULL;
+       struct flowi6 fl6;
+
+       if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
+           (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
+               pr_debug("addr is not unicast.\n");
+               return;
+       }
+
+       otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
+       if (!otcph)
+               return;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_TCP;
+       fl6.saddr = oip6h->daddr;
+       fl6.daddr = oip6h->saddr;
+       fl6.fl6_sport = otcph->dest;
+       fl6.fl6_dport = otcph->source;
+       security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst == NULL || dst->error) {
+               dst_release(dst);
+               return;
+       }
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       if (IS_ERR(dst))
+               return;
+
+       hh_len = (dst->dev->hard_header_len + 15)&~15;
+       nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+                        + sizeof(struct tcphdr) + dst->trailer_len,
+                        GFP_ATOMIC);
+
+       if (!nskb) {
+               net_dbg_ratelimited("cannot alloc skb\n");
+               dst_release(dst);
+               return;
+       }
+
+       skb_dst_set(nskb, dst);
+
+       skb_reserve(nskb, hh_len + dst->header_len);
+       ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+                                   ip6_dst_hoplimit(dst));
+       nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
 
        nf_ct_attach(nskb, oldskb);
 
@@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
                ip6_local_out(nskb);
 }
 EXPORT_SYMBOL_GPL(nf_send_reset6);
+
+MODULE_LICENSE("GPL");
index 556262f407616ee3a20bb10888e02ca1570d73c0..8a7ac685076d91b18baf91e9ab386a98415ee009 100644 (file)
@@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = {
        .eval           = nft_masq_ipv6_eval,
        .init           = nft_masq_init,
        .dump           = nft_masq_dump,
+       .validate       = nft_masq_validate,
 };
 
 static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
index fc24c390af0541050782c76434ccbbf9e00c24f0..97f41a3e68d98b89d6b8f643780f8a883bd46f55 100644 (file)
@@ -3,11 +3,45 @@
  * not configured or static.  These functions are needed by GSO/GRO implementation.
  */
 #include <linux/export.h>
+#include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
+/* This function exists only for tap drivers that must support broken
+ * clients requesting UFO without specifying an IPv6 fragment ID.
+ *
+ * This is similar to ipv6_select_ident() but we use an independent hash
+ * seed to limit information leakage.
+ *
+ * The network header must be set before calling this.
+ */
+void ipv6_proxy_select_ident(struct sk_buff *skb)
+{
+       static u32 ip6_proxy_idents_hashrnd __read_mostly;
+       struct in6_addr buf[2];
+       struct in6_addr *addrs;
+       u32 hash, id;
+
+       addrs = skb_header_pointer(skb,
+                                  skb_network_offset(skb) +
+                                  offsetof(struct ipv6hdr, saddr),
+                                  sizeof(buf), buf);
+       if (!addrs)
+               return;
+
+       net_get_random_once(&ip6_proxy_idents_hashrnd,
+                           sizeof(ip6_proxy_idents_hashrnd));
+
+       hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&addrs[0], hash);
+
+       id = ip_idents_reserve(hash, 1);
+       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+}
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
index 831495529b829c8ff4bc4ec3a8f0e7cf33b011b5..ace29b60813cf8a1d7182ad2262cbcbd21810fa7 100644 (file)
@@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
-       ip6_set_txhash(sk);
-
        /*
         *      TCP over IPv4
         */
@@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (err)
                goto late_failure;
 
+       ip6_set_txhash(sk);
+
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
                                                             sk->sk_v6_daddr.s6_addr32,
index ac49f84fe2c34026b7af5392df06fc232ea3b9e1..5f983644373a230890b25189865af73f5e2b3b44 100644 (file)
@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                case IPPROTO_DCCP:
                        if (!onlyproto && (nh + offset + 4 < skb->data ||
                             pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
-                               __be16 *ports = (__be16 *)exthdr;
+                               __be16 *ports;
 
+                               nh = skb_network_header(skb);
+                               ports = (__be16 *)(nh + offset);
                                fl6->fl6_sport = ports[!!reverse];
                                fl6->fl6_dport = ports[!reverse];
                        }
@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
                case IPPROTO_ICMPV6:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
-                               u8 *icmp = (u8 *)exthdr;
+                               u8 *icmp;
 
+                               nh = skb_network_header(skb);
+                               icmp = (u8 *)(nh + offset);
                                fl6->fl6_icmp_type = icmp[0];
                                fl6->fl6_icmp_code = icmp[1];
                        }
@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                case IPPROTO_MH:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
                                struct ip6_mh *mh;
-                               mh = (struct ip6_mh *)exthdr;
 
+                               nh = skb_network_header(skb);
+                               mh = (struct ip6_mh *)(nh + offset);
                                fl6->fl6_mh_type = mh->ip6mh_type;
                        }
                        fl6->flowi6_proto = nexthdr;
index fb6a1502b6dfa64980305c3bba380408ef3dde14..343da1e35025199bbef7d6680e0770fbdc3eadd2 100644 (file)
@@ -3458,7 +3458,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (chanctx_conf) {
-               *chandef = chanctx_conf->def;
+               *chandef = sdata->vif.bss_conf.chandef;
                ret = 0;
        } else if (local->open_count > 0 &&
                   local->open_count == local->monitors &&
index 8fdadfd94ba8576ae8bc0ee2e99e8c656c2f6a5b..6081329784dd4475ae95317dde31910efd760525 100644 (file)
@@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
         */
        if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
                u32 basic_rates = vif->bss_conf.basic_rates;
-               s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
+               s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
 
                rate = &sband->bitrates[rates[0].idx];
 
index edde723f9f009e33388f5a6f89129f445fae37a2..2acab1bcaa4b377012a31c6354ceeb61fadbb171 100644 (file)
@@ -62,14 +62,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
        unsigned int i, tp, prob, eprob;
        char *p;
 
-       ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
+       ms = kmalloc(2048, GFP_KERNEL);
        if (!ms)
                return -ENOMEM;
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "rate      throughput  ewma prob  this prob  "
-                       "this succ/attempt   success    attempts\n");
+       p += sprintf(p, "rate          tpt eprob *prob"
+                       "  *ok(*cum)        ok(      cum)\n");
        for (i = 0; i < mi->n_rates; i++) {
                struct minstrel_rate *mr = &mi->r[i];
                struct minstrel_rate_stats *mrs = &mi->r[i].stats;
@@ -86,8 +86,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
                prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->probability * 1000);
 
-               p += sprintf(p, "  %6u.%1u   %6u.%1u   %6u.%1u        "
-                               "   %3u(%3u)  %8llu    %8llu\n",
+               p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u"
+                               " %4u(%4u) %9llu(%9llu)\n",
                                tp / 10, tp % 10,
                                eprob / 10, eprob % 10,
                                prob / 10, prob % 10,
@@ -102,6 +102,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
                        mi->sample_packets);
        ms->len = p - ms->buf;
 
+       WARN_ON(ms->len + sizeof(*ms) > 2048);
+
        return 0;
 }
 
index a72ad46f2a04b6e557044d52da1f033c7be2c531..d537bec9375463eb115b0f26260f8f3d03759596 100644 (file)
@@ -63,8 +63,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
                prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mr->probability * 1000);
 
-               p += sprintf(p, "      %6u.%1u   %6u.%1u    %6u.%1u    "
-                               "%3u            %3u(%3u)  %8llu    %8llu\n",
+               p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u "
+                               "%3u %4u(%4u) %9llu(%9llu)\n",
                                tp / 10, tp % 10,
                                eprob / 10, eprob % 10,
                                prob / 10, prob % 10,
@@ -96,14 +96,15 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
                return ret;
        }
 
-       ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL);
+       ms = kmalloc(8192, GFP_KERNEL);
        if (!ms)
                return -ENOMEM;
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "type           rate     throughput  ewma prob   "
-                    "this prob  retry   this succ/attempt   success    attempts\n");
+       p += sprintf(p, "type           rate     tpt eprob *prob "
+                       "ret  *ok(*cum)        ok(      cum)\n");
+
 
        p = minstrel_ht_stats_dump(mi, max_mcs, p);
        for (i = 0; i < max_mcs; i++)
@@ -118,6 +119,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
                MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
        ms->len = p - ms->buf;
 
+       WARN_ON(ms->len + sizeof(*ms) > 8192);
+
        return nonseekable_open(inode, file);
 }
 
index 42f68cb8957e5a29d1a4e7965b65aff086bfe55e..bcda2ac7d84402f83d03f24390f22f2f32ea701e 100644 (file)
@@ -336,6 +336,7 @@ struct ieee80211_tx_latency_stat {
  * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
  *     AP only.
  * @cipher_scheme: optional cipher scheme for this station
+ * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
  */
 struct sta_info {
        /* General information, mostly static */
index 0a3c171be5374d2b813a18a368a40053e619cbef..6dec088c2d0f77dff06bf6bbd8557d2f325bafa1 100644 (file)
@@ -1,4 +1,4 @@
 #
 # Makefile for MPLS.
 #
-obj-y += mpls_gso.o
+obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
index e28ed2ef5b0650faf5f3f0db950c319f4caa7c31..e3545f21a099b01b931cd863205a847acd336238 100644 (file)
@@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
        __skb_push(skb, skb->mac_len);
 
        /* Segment inner packet. */
-       mpls_features = skb->dev->mpls_features & netif_skb_features(skb);
+       mpls_features = skb->dev->mpls_features & features;
        segs = skb_mac_gso_segment(skb, mpls_features);
 
 
@@ -59,8 +59,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
         * above pulled.  It will be re-pushed after returning
         * skb_mac_gso_segment(), an indirect caller of this function.
         */
-       __skb_push(skb, skb->data - skb_mac_header(skb));
-
+       __skb_pull(skb, skb->data - skb_mac_header(skb));
 out:
        return segs;
 }
index 912e5a05b79dbdc2de8aaa3729e08af01888dc4e..86f9d76b1464b1b0d5e8b5fc9f9c0c129713bd57 100644 (file)
@@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
        struct ip_set *set;
        struct ip_set_net *inst = ip_set_pernet(net);
 
-       if (index > inst->ip_set_max)
+       if (index >= inst->ip_set_max)
                return IPSET_INVALID_ID;
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
index 91f17c1eb8a20c0226e01c68e255acf118a6a4d0..437a3663ad0346e13cfbc0017be20b4bad73c218 100644 (file)
@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
        if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
                                                  local))) {
                IP_VS_DBG_RL("We are crossing local and non-local addresses"
-                            " daddr=%pI4\n", &dest->addr.ip);
+                            " daddr=%pI4\n", &daddr);
                goto err_put;
        }
 
@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
        if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
                                                  local))) {
                IP_VS_DBG_RL("We are crossing local and non-local addresses"
-                            " daddr=%pI6\n", &dest->addr.in6);
+                            " daddr=%pI6\n", daddr);
                goto err_put;
        }
 
index 44d1ea32570a07338dc39f34624bd823b6f76916..d87b6423ffb21e0f8f9b6ef25ef51c1cb5f54ad6 100644 (file)
@@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
        {
 /* REPLY */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
-/*syn*/           { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
+/*syn*/           { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
 /*
  *     sNO -> sIV      Never reached.
  *     sSS -> sS2      Simultaneous open
@@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sFW -> sIV
  *     sCW -> sIV
  *     sLA -> sIV
- *     sTW -> sIV      Reopened connection, but server may not do it.
+ *     sTW -> sSS      Reopened connection, but server may have switched role
  *     sCL -> sIV
  */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
index 556a0dfa4abc07f7ed7bde511e07db1a85c99ecd..11ab4b078f3bb68323b1f050f3a1d5e83d271f36 100644 (file)
@@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        basechain->stats = stats;
                } else {
                        stats = netdev_alloc_pcpu_stats(struct nft_stats);
-                       if (IS_ERR(stats)) {
+                       if (stats == NULL) {
                                module_put(type->owner);
                                kfree(basechain);
-                               return PTR_ERR(stats);
+                               return -ENOMEM;
                        }
                        rcu_assign_pointer(basechain->stats, stats);
                }
@@ -3744,6 +3744,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
        .abort          = nf_tables_abort,
 };
 
+int nft_chain_validate_dependency(const struct nft_chain *chain,
+                                 enum nft_chain_type type)
+{
+       const struct nft_base_chain *basechain;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               basechain = nft_base_chain(chain);
+               if (basechain->type->type != type)
+                       return -EOPNOTSUPP;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
+
 /*
  * Loop detection - walk through the ruleset beginning at the destination chain
  * of a new jump until either the source chain is reached (loop) or all
index b1e3a05794169283ed50d1c0fb4f44d9e7753eeb..5f1be5ba35595abb3e96a4d7c9e16fe2c8f4bd8c 100644 (file)
@@ -43,7 +43,8 @@
 #define NFULNL_NLBUFSIZ_DEFAULT        NLMSG_GOODSIZE
 #define NFULNL_TIMEOUT_DEFAULT         100     /* every second */
 #define NFULNL_QTHRESH_DEFAULT         100     /* 100 packets */
-#define NFULNL_COPY_RANGE_MAX  0xFFFF  /* max packet size is limited by 16-bit struct nfattr nfa_len field */
+/* max packet size is limited by 16-bit struct nfattr nfa_len field */
+#define NFULNL_COPY_RANGE_MAX  (0xFFFF - NLA_HDRLEN)
 
 #define PRINTR(x, args...)     do { if (net_ratelimit()) \
                                     printk(x, ## args); } while (0);
@@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
 
        case NFULNL_COPY_PACKET:
                inst->copy_mode = mode;
+               if (range == 0)
+                       range = NFULNL_COPY_RANGE_MAX;
                inst->copy_range = min_t(unsigned int,
                                         range, NFULNL_COPY_RANGE_MAX);
                break;
@@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
        return skb;
 }
 
-static int
+static void
 __nfulnl_send(struct nfulnl_instance *inst)
 {
-       int status = -1;
-
        if (inst->qlen > 1) {
                struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
                                                 NLMSG_DONE,
                                                 sizeof(struct nfgenmsg),
                                                 0);
-               if (!nlh)
+               if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
+                             inst->skb->len, skb_tailroom(inst->skb))) {
+                       kfree_skb(inst->skb);
                        goto out;
+               }
        }
-       status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
-                                  MSG_DONTWAIT);
-
+       nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
+                         MSG_DONTWAIT);
+out:
        inst->qlen = 0;
        inst->skb = NULL;
-out:
-       return status;
 }
 
 static void
@@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net,
                + nla_total_size(sizeof(u_int32_t))     /* gid */
                + nla_total_size(plen)                  /* prefix */
                + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
-               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
+               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
+               + nla_total_size(sizeof(struct nfgenmsg));      /* NLMSG_DONE */
 
        if (in && skb_mac_header_was_set(skb)) {
                size +=   nla_total_size(skb->dev->hard_header_len)
@@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net,
                break;
 
        case NFULNL_COPY_PACKET:
-               if (inst->copy_range == 0
-                   || inst->copy_range > skb->len)
+               if (inst->copy_range > skb->len)
                        data_len = skb->len;
                else
                        data_len = inst->copy_range;
@@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net,
                goto unlock_and_release;
        }
 
-       if (inst->skb &&
-           size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
+       if (inst->skb && size > skb_tailroom(inst->skb)) {
                /* either the queue len is too high or we don't have
                 * enough room in the skb left. flush to userspace. */
                __nfulnl_flush(inst);
index a82077d9f59b2f49cf755bcc9c8754846cb64b3e..7c60ccd61a3e1685875adc9f2d2391e4957399c8 100644 (file)
@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
         * returned by nf_queue.  For instance, callers rely on -ECANCELED to
         * mean 'ignore this hook'.
         */
-       if (IS_ERR(segs))
+       if (IS_ERR_OR_NULL(segs))
                goto out_err;
        queued = 0;
        err = 0;
index 7e2683c8a44a9d778ab39c212f7204c28770091c..9d6d6f60a80fc6b23da9bb140c90e085a4fa675a 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
-#include <asm/uaccess.h> /* for set_fs */
 #include <net/netfilter/nf_tables.h>
 
+static const struct {
+       const char      *name;
+       u8              type;
+} table_to_chaintype[] = {
+       { "filter",     NFT_CHAIN_T_DEFAULT },
+       { "raw",        NFT_CHAIN_T_DEFAULT },
+       { "security",   NFT_CHAIN_T_DEFAULT },
+       { "mangle",     NFT_CHAIN_T_ROUTE },
+       { "nat",        NFT_CHAIN_T_NAT },
+       { },
+};
+
+static int nft_compat_table_to_chaintype(const char *table)
+{
+       int i;
+
+       for (i = 0; table_to_chaintype[i].name != NULL; i++) {
+               if (strcmp(table_to_chaintype[i].name, table) == 0)
+                       return table_to_chaintype[i].type;
+       }
+
+       return -1;
+}
+
+static int nft_compat_chain_validate_dependency(const char *tablename,
+                                               const struct nft_chain *chain)
+{
+       enum nft_chain_type type;
+       const struct nft_base_chain *basechain;
+
+       if (!tablename || !(chain->flags & NFT_BASE_CHAIN))
+               return 0;
+
+       type = nft_compat_table_to_chaintype(tablename);
+       if (type < 0)
+               return -EINVAL;
+
+       basechain = nft_base_chain(chain);
+       if (basechain->type->type != type)
+               return -EINVAL;
+
+       return 0;
+}
+
 union nft_entry {
        struct ipt_entry e4;
        struct ip6t_entry e6;
@@ -95,6 +138,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                par->hook_mask = 1 << ops->hooknum;
+       } else {
+               par->hook_mask = 0;
        }
        par->family     = ctx->afi->family;
 }
@@ -151,6 +196,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        union nft_entry e = {};
        int ret;
 
+       ret = nft_compat_chain_validate_dependency(target->table, ctx->chain);
+       if (ret < 0)
+               goto err;
+
        target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
 
        if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -216,6 +265,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
 {
        struct xt_target *target = expr->ops->data;
        unsigned int hook_mask = 0;
+       int ret;
 
        if (ctx->chain->flags & NFT_BASE_CHAIN) {
                const struct nft_base_chain *basechain =
@@ -223,11 +273,13 @@ static int nft_target_validate(const struct nft_ctx *ctx,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                hook_mask = 1 << ops->hooknum;
-               if (hook_mask & target->hooks)
-                       return 0;
+               if (!(hook_mask & target->hooks))
+                       return -EINVAL;
 
-               /* This target is being called from an invalid chain */
-               return -EINVAL;
+               ret = nft_compat_chain_validate_dependency(target->table,
+                                                          ctx->chain);
+               if (ret < 0)
+                       return ret;
        }
        return 0;
 }
@@ -293,6 +345,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                par->hook_mask = 1 << ops->hooknum;
+       } else {
+               par->hook_mask = 0;
        }
        par->family     = ctx->afi->family;
 }
@@ -320,6 +374,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        union nft_entry e = {};
        int ret;
 
+       ret = nft_compat_chain_validate_dependency(match->name, ctx->chain);
+       if (ret < 0)
+               goto err;
+
        match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
 
        if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -379,6 +437,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
 {
        struct xt_match *match = expr->ops->data;
        unsigned int hook_mask = 0;
+       int ret;
 
        if (ctx->chain->flags & NFT_BASE_CHAIN) {
                const struct nft_base_chain *basechain =
@@ -386,11 +445,13 @@ static int nft_match_validate(const struct nft_ctx *ctx,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                hook_mask = 1 << ops->hooknum;
-               if (hook_mask & match->hooks)
-                       return 0;
+               if (!(hook_mask & match->hooks))
+                       return -EINVAL;
 
-               /* This match is being called from an invalid chain */
-               return -EINVAL;
+               ret = nft_compat_chain_validate_dependency(match->name,
+                                                          ctx->chain);
+               if (ret < 0)
+                       return ret;
        }
        return 0;
 }
@@ -611,7 +672,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        family = ctx->afi->family;
 
        /* Re-use the existing target if it's already loaded. */
-       list_for_each_entry(nft_target, &nft_match_list, head) {
+       list_for_each_entry(nft_target, &nft_target_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
                if (strcmp(target->name, tg_name) == 0 &&
index 6637bab0056705d6cf9671f681b65b1c5166523a..d1ffd5eb3a9b5b86495b53adb5b8bc27c17921df 100644 (file)
@@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx,
                  const struct nlattr * const tb[])
 {
        struct nft_masq *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_MASQ_FLAGS] == NULL)
                return 0;
@@ -55,5 +60,12 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_masq_dump);
 
+int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                     const struct nft_data **data)
+{
+       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+}
+EXPORT_SYMBOL_GPL(nft_masq_validate);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index 799550b476fbdeeadc2745aaff97fe5f2b197761..afe2b0b45ec41f82df6f2430958a4392aa5eb608 100644 (file)
@@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        u32 family;
        int err;
 
-       if (tb[NFTA_NAT_TYPE] == NULL)
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_NAT_TYPE] == NULL ||
+           (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
+            tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
                return -EINVAL;
 
        switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
@@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        priv->family = family;
 
        if (tb[NFTA_NAT_REG_ADDR_MIN]) {
-               priv->sreg_addr_min = ntohl(nla_get_be32(
-                                               tb[NFTA_NAT_REG_ADDR_MIN]));
+               priv->sreg_addr_min =
+                       ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN]));
+
                err = nft_validate_input_register(priv->sreg_addr_min);
                if (err < 0)
                        return err;
-       }
 
-       if (tb[NFTA_NAT_REG_ADDR_MAX]) {
-               priv->sreg_addr_max = ntohl(nla_get_be32(
-                                               tb[NFTA_NAT_REG_ADDR_MAX]));
-               err = nft_validate_input_register(priv->sreg_addr_max);
-               if (err < 0)
-                       return err;
-       } else
-               priv->sreg_addr_max = priv->sreg_addr_min;
+               if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+                       priv->sreg_addr_max =
+                               ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX]));
+
+                       err = nft_validate_input_register(priv->sreg_addr_max);
+                       if (err < 0)
+                               return err;
+               } else {
+                       priv->sreg_addr_max = priv->sreg_addr_min;
+               }
+       }
 
        if (tb[NFTA_NAT_REG_PROTO_MIN]) {
-               priv->sreg_proto_min = ntohl(nla_get_be32(
-                                               tb[NFTA_NAT_REG_PROTO_MIN]));
+               priv->sreg_proto_min =
+                       ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN]));
+
                err = nft_validate_input_register(priv->sreg_proto_min);
                if (err < 0)
                        return err;
-       }
 
-       if (tb[NFTA_NAT_REG_PROTO_MAX]) {
-               priv->sreg_proto_max = ntohl(nla_get_be32(
-                                               tb[NFTA_NAT_REG_PROTO_MAX]));
-               err = nft_validate_input_register(priv->sreg_proto_max);
-               if (err < 0)
-                       return err;
-       } else
-               priv->sreg_proto_max = priv->sreg_proto_min;
+               if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+                       priv->sreg_proto_max =
+                               ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX]));
+
+                       err = nft_validate_input_register(priv->sreg_proto_max);
+                       if (err < 0)
+                               return err;
+               } else {
+                       priv->sreg_proto_max = priv->sreg_proto_min;
+               }
+       }
 
        if (tb[NFTA_NAT_FLAGS]) {
                priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
@@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
        if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
                goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min)))
-               goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
-               goto nla_put_failure;
+
+       if (priv->sreg_addr_min) {
+               if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN,
+                                htonl(priv->sreg_addr_min)) ||
+                   nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX,
+                                htonl(priv->sreg_addr_max)))
+                       goto nla_put_failure;
+       }
+
        if (priv->sreg_proto_min) {
                if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
-                                htonl(priv->sreg_proto_min)))
-                       goto nla_put_failure;
-               if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
+                                htonl(priv->sreg_proto_min)) ||
+                   nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
                                 htonl(priv->sreg_proto_max)))
                        goto nla_put_failure;
        }
@@ -205,6 +219,13 @@ nla_put_failure:
        return -1;
 }
 
+static int nft_nat_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
+{
+       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+}
+
 static struct nft_expr_type nft_nat_type;
 static const struct nft_expr_ops nft_nat_ops = {
        .type           = &nft_nat_type,
@@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = {
        .eval           = nft_nat_eval,
        .init           = nft_nat_init,
        .dump           = nft_nat_dump,
+       .validate       = nft_nat_validate,
 };
 
 static struct nft_expr_type nft_nat_type __read_mostly = {
index 7a186e74b1b3533b936e0868ff49187321215386..f1de72de273e20e7422bf6de42ebbca712affacf 100644 (file)
@@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
+/* nl_table locking explained:
+ * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
+ * combined with an RCU read-side lock. Insertion and removal are protected
+ * with nl_sk_hash_lock while using RCU list modification primitives and may
+ * run in parallel to nl_table_lock protected lookups. Destruction of the
+ * Netlink socket may only occur *after* nl_table_lock has been acquired
+ * either during or after the socket has been removed from the list.
+ */
 DEFINE_RWLOCK(nl_table_lock);
 EXPORT_SYMBOL_GPL(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
 static int lockdep_nl_sk_hash_is_held(void)
 {
 #ifdef CONFIG_LOCKDEP
-       return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1;
-#else
-       return 1;
+       if (debug_locks)
+               return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
 #endif
+       return 1;
 }
 
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
@@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
        struct netlink_table *table = &nl_table[protocol];
        struct sock *sk;
 
+       read_lock(&nl_table_lock);
        rcu_read_lock();
        sk = __netlink_lookup(table, portid, net);
        if (sk)
                sock_hold(sk);
        rcu_read_unlock();
+       read_unlock(&nl_table_lock);
 
        return sk;
 }
@@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock)
        }
        netlink_table_ungrab();
 
-       /* Wait for readers to complete */
-       synchronize_net();
-
        kfree(nlk->groups);
        nlk->groups = NULL;
 
@@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock)
 
 retry:
        cond_resched();
+       netlink_table_grab();
        rcu_read_lock();
        if (__netlink_lookup(table, portid, net)) {
                /* Bind collision, search negative portid values. */
@@ -1288,9 +1296,11 @@ retry:
                if (rover > -4097)
                        rover = -4097;
                rcu_read_unlock();
+               netlink_table_ungrab();
                goto retry;
        }
        rcu_read_unlock();
+       netlink_table_ungrab();
 
        err = netlink_insert(sk, net, portid);
        if (err == -EADDRINUSE)
@@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
 }
 
 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
+       __acquires(nl_table_lock) __acquires(RCU)
 {
+       read_lock(&nl_table_lock);
        rcu_read_lock();
        return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
+       struct rhashtable *ht;
        struct netlink_sock *nlk;
        struct nl_seq_iter *iter;
        struct net *net;
@@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        iter = seq->private;
        nlk = v;
 
-       rht_for_each_entry_rcu(nlk, nlk->node.next, node)
+       i = iter->link;
+       ht = &nl_table[i].hash;
+       rht_for_each_entry(nlk, nlk->node.next, ht, node)
                if (net_eq(sock_net((struct sock *)nlk), net))
                        return nlk;
 
-       i = iter->link;
        j = iter->hash_idx + 1;
 
        do {
-               struct rhashtable *ht = &nl_table[i].hash;
                const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
 
                for (; j < tbl->size; j++) {
-                       rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
+                       rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
                                if (net_eq(sock_net((struct sock *)nlk), net)) {
                                        iter->link = i;
                                        iter->hash_idx = j;
@@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void netlink_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
+       __releases(RCU) __releases(nl_table_lock)
 {
        rcu_read_unlock();
+       read_unlock(&nl_table_lock);
 }
 
 
index 2e31d9e7f4dc4ca1c002b3c488b838236dcdf2de..e6d7255183eba31267ec29705441a019681ca617 100644 (file)
@@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
        segs = __skb_gso_segment(skb, NETIF_F_SG, false);
        if (IS_ERR(segs))
                return PTR_ERR(segs);
+       if (segs == NULL)
+               return -EINVAL;
 
        /* Queue all of the segments. */
        skb = segs;
index 2cf61b3e633c25bb507860e970935998b05bf10d..76f402e05bd6f7a5ea50653629855039e6940d71 100644 (file)
@@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
                if (qdisc_is_percpu_stats(sch)) {
                        sch->cpu_bstats =
-                               alloc_percpu(struct gnet_stats_basic_cpu);
+                               netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
                        if (!sch->cpu_bstats)
                                goto err_out4;
 
index 33d7a98a7a9799b3cb5b3ce62c9013c49483184c..b783a446d884d85af054c9a139f91a63a162bc9f 100644 (file)
@@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
        sch->limit = q->params.limit;
 
        setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
-       mod_timer(&q->adapt_timer, jiffies + HZ / 2);
 
        if (opt) {
                int err = pie_change(sch, opt);
@@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
                        return err;
        }
 
+       mod_timer(&q->adapt_timer, jiffies + HZ / 2);
        return 0;
 }
 
index 90cee4a6fce49450a0f83cca53fcf663ebea8148..5781634e957d7be47c2e572783f3af2a3db9bf91 100644 (file)
@@ -219,11 +219,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns)
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active = &n_ptr->active_links[0];
-       u32 addr = n_ptr->addr;
 
        n_ptr->working_links++;
-       tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
-                            l_ptr->bearer_id, addr);
+       n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
+       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+
        pr_info("Established link <%s> on network plane %c\n",
                l_ptr->name, l_ptr->net_plane);
 
@@ -284,10 +284,10 @@ static void node_select_active_links(struct tipc_node *n_ptr)
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active;
-       u32 addr = n_ptr->addr;
 
        n_ptr->working_links--;
-       tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
+       n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 
        if (!tipc_link_is_active(l_ptr)) {
                pr_info("Lost standby link <%s> on network plane %c\n",
@@ -552,28 +552,30 @@ void tipc_node_unlock(struct tipc_node *node)
        LIST_HEAD(conn_sks);
        struct sk_buff_head waiting_sks;
        u32 addr = 0;
-       unsigned int flags = node->action_flags;
+       int flags = node->action_flags;
+       u32 link_id = 0;
 
-       if (likely(!node->action_flags)) {
+       if (likely(!flags)) {
                spin_unlock_bh(&node->lock);
                return;
        }
 
+       addr = node->addr;
+       link_id = node->link_id;
        __skb_queue_head_init(&waiting_sks);
-       if (node->action_flags & TIPC_WAKEUP_USERS) {
+
+       if (flags & TIPC_WAKEUP_USERS)
                skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
-               node->action_flags &= ~TIPC_WAKEUP_USERS;
-       }
-       if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
+
+       if (flags & TIPC_NOTIFY_NODE_DOWN) {
                list_replace_init(&node->nsub, &nsub_list);
                list_replace_init(&node->conn_sks, &conn_sks);
-               node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
        }
-       if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
-               node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
-               addr = node->addr;
-       }
-       node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS;
+       node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
+                               TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
+                               TIPC_NOTIFY_LINK_DOWN |
+                               TIPC_WAKEUP_BCAST_USERS);
+
        spin_unlock_bh(&node->lock);
 
        while (!skb_queue_empty(&waiting_sks))
@@ -588,6 +590,14 @@ void tipc_node_unlock(struct tipc_node *node)
        if (flags & TIPC_WAKEUP_BCAST_USERS)
                tipc_bclink_wakeup_users();
 
-       if (addr)
+       if (flags & TIPC_NOTIFY_NODE_UP)
                tipc_named_node_up(addr);
+
+       if (flags & TIPC_NOTIFY_LINK_UP)
+               tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
+                                    TIPC_NODE_SCOPE, link_id, addr);
+
+       if (flags & TIPC_NOTIFY_LINK_DOWN)
+               tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
+                                     link_id, addr);
 }
index 67513c3c852c41f2d2a89ffc4d1eb22b7e77de91..04e91458bb29a17916964e65fbca8a99a35a7f0d 100644 (file)
@@ -53,6 +53,7 @@
  * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
  * TIPC_NOTIFY_NODE_UP: notify node is up
+ * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
        TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
@@ -60,7 +61,9 @@ enum {
        TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
        TIPC_NOTIFY_NODE_UP             = (1 << 4),
        TIPC_WAKEUP_USERS               = (1 << 5),
-       TIPC_WAKEUP_BCAST_USERS         = (1 << 6)
+       TIPC_WAKEUP_BCAST_USERS         = (1 << 6),
+       TIPC_NOTIFY_LINK_UP             = (1 << 7),
+       TIPC_NOTIFY_LINK_DOWN           = (1 << 8)
 };
 
 /**
@@ -100,6 +103,7 @@ struct tipc_node_bclink {
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
  * @signature: node instance identifier
+ * @link_id: local and remote bearer ids of changing link, if any
  * @nsub: list of "node down" subscriptions monitoring node
  * @rcu: rcu struct for tipc_node
  */
@@ -116,6 +120,7 @@ struct tipc_node {
        int link_cnt;
        int working_links;
        u32 signature;
+       u32 link_id;
        struct list_head nsub;
        struct sk_buff_head waiting_sks;
        struct list_head conn_sks;
index 75275c5cf9291a0afe0818b6ef99ccfc14db8822..51bddc236a15582b1a1dd0deb0b1a733c1d40070 100644 (file)
@@ -1776,7 +1776,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
        sk = &tsk->sk;
 
        /* Queue message */
-       bh_lock_sock(sk);
+       spin_lock_bh(&sk->sk_lock.slock);
 
        if (!sock_owned_by_user(sk)) {
                rc = filter_rcv(sk, buf);
@@ -1787,7 +1787,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
                if (sk_add_backlog(sk, buf, limit))
                        rc = -TIPC_ERR_OVERLOAD;
        }
-       bh_unlock_sock(sk);
+       spin_unlock_bh(&sk->sk_lock.slock);
        tipc_sk_put(tsk);
        if (likely(!rc))
                return 0;
@@ -2673,7 +2673,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
        case SIOCGETLINKNAME:
                if (copy_from_user(&lnr, argp, sizeof(lnr)))
                        return -EFAULT;
-               if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
+               if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
                                            lnr.linkname, TIPC_MAX_LINK_NAME)) {
                        if (copy_to_user(argp, &lnr, sizeof(lnr)))
                                return -EFAULT;
index cb9f5a44ffadf7175d109cadefb091bbfdbb41e4..5839c85075f15407e86d84526cfb29087c70aa81 100644 (file)
@@ -5927,6 +5927,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        int err;
        bool need_new_beacon = false;
        int len, i;
+       u32 cs_count;
 
        if (!rdev->ops->channel_switch ||
            !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5963,7 +5964,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES])
                return -EINVAL;
 
-       params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+       /* Even though the attribute is u32, the specification says
+        * u8, so let's make sure we don't overflow.
+        */
+       cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+       if (cs_count > 255)
+               return -EINVAL;
+
+       params.count = cs_count;
 
        if (!need_new_beacon)
                goto skip_beacons;
index 499d6c18a8ce4366a5fe61184eb7416bfb060401..7c532856b39829f0cddf50baecc6d8c73c3422e7 100644 (file)
@@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb)
        kfree_skb(skb);
        if (IS_ERR(segs))
                return PTR_ERR(segs);
+       if (segs == NULL)
+               return -EINVAL;
 
        do {
                struct sk_buff *nskb = segs->next;
index 4c4e457e788861a1255dc654cc4d4eec80c22fda..88bf289abdc925a05730d6695944bdef1ec67eaf 100644 (file)
@@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
        struct xfrm_policy *pol = xdst->pols[0];
        struct xfrm_policy_queue *pq = &pol->polq;
 
-       if (unlikely(skb_fclone_busy(skb))) {
+       if (unlikely(skb_fclone_busy(sk, skb))) {
                kfree_skb(skb);
                return 0;
        }
index f44ef11f65a787a26fe99791b24b2b2606321a0e..eb4bec0ad8afd2da3c8a52c61f1937425b082665 100644 (file)
@@ -208,6 +208,17 @@ static struct bpf_test tests[] = {
                .errstr = "R0 !read_ok",
                .result = REJECT,
        },
+       {
+               "program doesn't init R0 before exit in all branches",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R0 !read_ok",
+               .result = REJECT,
+       },
        {
                "stack out of bounds",
                .insns = {
index 9685af330de5db40f4d7d097ea736b377dd68b36..c5ee1a7c5e8a0a361a8fb21de3ee236faba3804a 100644 (file)
@@ -319,9 +319,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
 {
        const struct evm_ima_xattr_data *xattr_data = xattr_value;
 
-       if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
-           && (xattr_data->type == EVM_XATTR_HMAC))
-               return -EPERM;
+       if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+               if (!xattr_value_len)
+                       return -EINVAL;
+               if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
+                       return -EPERM;
+       }
        return evm_protect_xattr(dentry, xattr_name, xattr_value,
                                 xattr_value_len);
 }
index 922685483bd3625355af6202bb435728ac62c11f..7c8f41e618b6bf41bf5909b89335b465d16512c8 100644 (file)
@@ -378,6 +378,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
        result = ima_protect_xattr(dentry, xattr_name, xattr_value,
                                   xattr_value_len);
        if (result == 1) {
+               if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+                       return -EINVAL;
                ima_reset_appraise_flags(dentry->d_inode,
                         (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
                result = 0;
index c0379d13dbe16f8d4c9705e1c80fd8b2c7b0e4df..9d1c2ebfe12a71d872727fe6f36716ec0a03c5d5 100644 (file)
@@ -61,6 +61,7 @@ enum evm_ima_xattr_type {
        EVM_XATTR_HMAC,
        EVM_IMA_XATTR_DIGSIG,
        IMA_XATTR_DIGEST_NG,
+       IMA_XATTR_LAST
 };
 
 struct evm_ima_xattr_data {
index 102e8fd1d4505416ece1d42333cd8270b76cb395..2d957ba635578758172c9d66addc181a3ce438a9 100644 (file)
@@ -210,6 +210,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
        if (err < 0)
                return err;
 
+       if (clear_user(src, sizeof(*src)))
+               return -EFAULT;
        if (put_user(status.state, &src->state) ||
            compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
            compat_put_timespec(&status.tstamp, &src->tstamp) ||
index 45a0eed6d5b17413eaa002e8eb4eafeef2d67088..3b052ed0fbf58d284d55b562ac46ec1ee65cc763 100644 (file)
 #define SAFFIRE_CLOCK_SOURCE_INTERNAL          0
 #define SAFFIRE_CLOCK_SOURCE_SPDIF             1
 
-/* '1' is absent, why... */
+/* clock sources as returned from register of Saffire Pro 10 and 26 */
 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL       0
+#define SAFFIREPRO_CLOCK_SOURCE_SKIP           1 /* never used on hardware */
 #define SAFFIREPRO_CLOCK_SOURCE_SPDIF          2
-#define SAFFIREPRO_CLOCK_SOURCE_ADAT1          3
-#define SAFFIREPRO_CLOCK_SOURCE_ADAT2          4
+#define SAFFIREPRO_CLOCK_SOURCE_ADAT1          3 /* not used on s.pro. 10 */
+#define SAFFIREPRO_CLOCK_SOURCE_ADAT2          4 /* not used on s.pro. 10 */
 #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK      5
+#define SAFFIREPRO_CLOCK_SOURCE_COUNT          6
 
 /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */
 #define SAFFIREPRO_ENABLE_DIG_IFACES           0x01a4
@@ -101,13 +103,34 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value)
                                  &data, sizeof(__be32), 0);
 }
 
+static char *const saffirepro_10_clk_src_labels[] = {
+       SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
+};
 static char *const saffirepro_26_clk_src_labels[] = {
        SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock"
 };
-
-static char *const saffirepro_10_clk_src_labels[] = {
-       SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
+/* Value maps between registers and labels for SaffirePro 10/26. */
+static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = {
+       /* SaffirePro 10 */
+       [0] = {
+               [SAFFIREPRO_CLOCK_SOURCE_INTERNAL]  =  0,
+               [SAFFIREPRO_CLOCK_SOURCE_SKIP]      = -1, /* not supported */
+               [SAFFIREPRO_CLOCK_SOURCE_SPDIF]     =  1,
+               [SAFFIREPRO_CLOCK_SOURCE_ADAT1]     = -1, /* not supported */
+               [SAFFIREPRO_CLOCK_SOURCE_ADAT2]     = -1, /* not supported */
+               [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] =  2,
+       },
+       /* SaffirePro 26 */
+       [1] = {
+               [SAFFIREPRO_CLOCK_SOURCE_INTERNAL]  =  0,
+               [SAFFIREPRO_CLOCK_SOURCE_SKIP]      = -1, /* not supported */
+               [SAFFIREPRO_CLOCK_SOURCE_SPDIF]     =  1,
+               [SAFFIREPRO_CLOCK_SOURCE_ADAT1]     =  2,
+               [SAFFIREPRO_CLOCK_SOURCE_ADAT2]     =  3,
+               [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] =  4,
+       }
 };
+
 static int
 saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate)
 {
@@ -138,24 +161,35 @@ saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate)
 
        return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id);
 }
+
+/*
+ * query hardware for current clock source, return our internally
+ * used clock index in *id, depending on hardware.
+ */
 static int
 saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
 {
        int err;
-       u32 value;
+       u32 value;       /* clock source read from hw register */
+       const signed char *map;
 
        err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value);
        if (err < 0)
                goto end;
 
-       if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) {
-               if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK)
-                       *id = 2;
-               else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF)
-                       *id = 1;
-       } else if (value > 1) {
-               *id = value - 1;
+       /* depending on hardware, use a different mapping */
+       if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels)
+               map = saffirepro_clk_maps[0];
+       else
+               map = saffirepro_clk_maps[1];
+
+       /* In a case that this driver cannot handle the value of register. */
+       if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
+               err = -EIO;
+               goto end;
        }
+
+       *id = (unsigned int)map[value];
 end:
        return err;
 }
index ef4d0c9f65781a2e2684a143264a8b3aad12d8be..1aab0a32870c84d20a0c5b87f46d625299b34fd4 100644 (file)
@@ -129,12 +129,24 @@ snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal)
        /* 1.The device has its own operation to switch source of clock */
        if (clk_spec) {
                err = clk_spec->get(bebob, &id);
-               if (err < 0)
+               if (err < 0) {
                        dev_err(&bebob->unit->device,
                                "fail to get clock source: %d\n", err);
-               else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
-                                strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
+                       goto end;
+               }
+
+               if (id >= clk_spec->num) {
+                       dev_err(&bebob->unit->device,
+                               "clock source %d out of range 0..%d\n",
+                               id, clk_spec->num - 1);
+                       err = -EIO;
+                       goto end;
+               }
+
+               if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
+                           strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
                        *internal = true;
+
                goto end;
        }
 
index 0e4c0bfc463bbbb3222d92a54f269c3ecff283ee..9940611f2e1b59cd8a15ea9d55c01a6f15c8d417 100644 (file)
@@ -24,7 +24,12 @@ phase88_rack_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
        if (err < 0)
                goto end;
 
-       *id = (enable_ext & 0x01) | ((enable_word & 0x01) << 1);
+       if (enable_ext == 0)
+               *id = 0;
+       else if (enable_word == 0)
+               *id = 1;
+       else
+               *id = 2;
 end:
        return err;
 }
index 7bfdf9c514165c384a36beb89d72f4a9678648b3..1610c38337afe6887e02074f98446a6c18aadc6e 100644 (file)
@@ -681,7 +681,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
        
        /* WARQ is at offset 12 */
        tmp = (reg & AD_DS_WSMC_WARQ) ?
-                       (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4;
+               ((((reg & AD_DS_WSMC_WARQ) >> 12) & 0x01) ? 12 : 18) : 4;
        tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1;
        
        snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp,
@@ -693,7 +693,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
        
        /* SYRQ is at offset 4 */
        tmp = (reg & AD_DS_WSMC_SYRQ) ?
-                       (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4;
+               ((((reg & AD_DS_WSMC_SYRQ) >> 4) & 0x01) ? 12 : 18) : 4;
        tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1;
        
        snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp,
@@ -709,7 +709,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
        
        /* ACRQ is at offset 4 */
        tmp = (reg & AD_DS_RAMC_ACRQ) ?
-                       (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4;
+               ((((reg & AD_DS_RAMC_ACRQ) >> 4) & 0x01) ? 12 : 18) : 4;
        tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1;
        
        snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp,
@@ -720,7 +720,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
                        
        /* RERQ is at offset 12 */
        tmp = (reg & AD_DS_RAMC_RERQ) ?
-                       (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4;
+               ((((reg & AD_DS_RAMC_RERQ) >> 12) & 0x01) ? 12 : 18) : 4;
        tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1;
        
        snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp,
index cfcca4c30d4da5f2be82ae40a32b709d9279f97b..9ab1e631cb32244262d9a0cfe5009adfe4677aca 100644 (file)
@@ -374,6 +374,8 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
 #ifdef CONFIG_SND_DMA_SGBUF
        if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
                struct snd_sg_buf *sgbuf = dmab->private_data;
+               if (chip->driver_type == AZX_DRIVER_CMEDIA)
+                       return; /* deal with only CORB/RIRB buffers */
                if (on)
                        set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
                else
@@ -1769,7 +1771,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
 #ifdef CONFIG_X86
        struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
        struct azx *chip = apcm->chip;
-       if (!azx_snoop(chip))
+       if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
                area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
 #endif
 }
index 34b7bdb510c794d340323b45a7b9170a5dfa176e..c9cf248ce8ecbec0ec6f955da1c8d263aeed170f 100644 (file)
@@ -2675,7 +2675,7 @@ static void alc269_shutup(struct hda_codec *codec)
 
 static struct coef_fw alc282_coefs[] = {
        WRITE_COEF(0x03, 0x0002), /* Power Down Control */
-       WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */
+       UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
        WRITE_COEF(0x07, 0x0200), /* DMIC control */
        UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
        UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
@@ -2786,7 +2786,7 @@ static void alc282_shutup(struct hda_codec *codec)
 
 static struct coef_fw alc283_coefs[] = {
        WRITE_COEF(0x03, 0x0002), /* Power Down Control */
-       WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */
+       UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
        WRITE_COEF(0x07, 0x0200), /* DMIC control */
        UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
        UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
@@ -2817,6 +2817,7 @@ static struct coef_fw alc283_coefs[] = {
        UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */
        UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */
        WRITE_COEF(0x37, 0xfc06), /* Class D amp control */
+       UPDATE_COEF(0x1b, 0x8000, 0), /* HP JD control */
        {}
 };
 
@@ -5922,6 +5923,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
index 0e9623368ab0ad19fcba9fc5970949dce213e35d..7d5d6444a83737ffa3c01acbe1925de619e0390a 100644 (file)
@@ -49,7 +49,6 @@ source "sound/soc/mxs/Kconfig"
 source "sound/soc/pxa/Kconfig"
 source "sound/soc/rockchip/Kconfig"
 source "sound/soc/samsung/Kconfig"
-source "sound/soc/s6000/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/sirf/Kconfig"
 source "sound/soc/spear/Kconfig"
index 534714a1ca449dcfbefc176bba1e65cdce73a782..d88edfced8c498c4bed67dfc5b14941c4ba904bf 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_SND_SOC) += kirkwood/
 obj-$(CONFIG_SND_SOC)  += pxa/
 obj-$(CONFIG_SND_SOC)  += rockchip/
 obj-$(CONFIG_SND_SOC)  += samsung/
-obj-$(CONFIG_SND_SOC)  += s6000/
 obj-$(CONFIG_SND_SOC)  += sh/
 obj-$(CONFIG_SND_SOC)  += sirf/
 obj-$(CONFIG_SND_SOC)  += spear/
index 5518ebd6947c5f28329fb474b4b313920f5e94ec..91f60282fd2fbff8a64315ec8400b3e156a374b9 100644 (file)
@@ -405,6 +405,7 @@ static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = {
                2, 0, NULL, 0),
 
        SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ALC Clock", ADAU1761_CLK_ENABLE0, 5, 0, NULL, 0),
 
        SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1,
                0, 0, NULL, 0),
@@ -436,6 +437,9 @@ static const struct snd_soc_dapm_route adau1761_dapm_routes[] = {
        { "Right Playback Mixer", NULL, "Slew Clock" },
        { "Left Playback Mixer", NULL, "Slew Clock" },
 
+       { "Left Input Mixer", NULL, "ALC Clock" },
+       { "Right Input Mixer", NULL, "ALC Clock" },
+
        { "Digital Clock 0", NULL, "SYSCLK" },
        { "Digital Clock 1", NULL, "SYSCLK" },
 };
index 3b145313f93eecc0602cc72115d71769f07fdf26..ed866e9a2928c4ca5415571fc757f6ba588b636f 100644 (file)
@@ -792,7 +792,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        asrc_priv->pdev = pdev;
-       strcpy(asrc_priv->name, np->name);
+       strncpy(asrc_priv->name, np->name, sizeof(asrc_priv->name) - 1);
 
        /* Get the addresses and IRQ */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 8bcdfda09d7ad9c5fd724cc685e63f3ac7829f77..a645e296199e14086c6def7a98d34be5de54bcf2 100644 (file)
@@ -734,7 +734,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        esai_priv->pdev = pdev;
-       strcpy(esai_priv->name, np->name);
+       strncpy(esai_priv->name, np->name, sizeof(esai_priv->name) - 1);
 
        /* Get the addresses and IRQ */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 33fc5c3abf558e52850e46c523702559e203b1bc..4df867cbb92a1992154b12bf1f69293accd51367 100644 (file)
@@ -691,9 +691,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
 }
 
 #define HSW_FORMATS \
-       (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
-       SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
-       SNDRV_PCM_FMTBIT_S8)
+       (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
 
 static struct snd_soc_dai_driver hsw_dais[] = {
        {
diff --git a/sound/soc/s6000/Kconfig b/sound/soc/s6000/Kconfig
deleted file mode 100644 (file)
index f244a25..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-config SND_S6000_SOC
-       tristate "SoC Audio for the Stretch s6000 family"
-       depends on XTENSA_VARIANT_S6000 || COMPILE_TEST
-       depends on HAS_IOMEM
-       select SND_S6000_SOC_PCM if XTENSA_VARIANT_S6000
-       help
-         Say Y or M if you want to add support for codecs attached to
-         s6000 family chips. You will also need to select the platform
-         to support below.
-
-config SND_S6000_SOC_PCM
-       tristate
-
-config SND_S6000_SOC_I2S
-       tristate
-
-config SND_S6000_SOC_S6IPCAM
-       bool "SoC Audio support for Stretch 6105 IP Camera"
-       depends on SND_S6000_SOC=y
-       depends on I2C=y
-       depends on XTENSA_PLATFORM_S6105 || COMPILE_TEST
-       select SND_S6000_SOC_I2S
-       select SND_SOC_TLV320AIC3X
-       help
-         Say Y if you want to add support for SoC audio on the
-         Stretch s6105 IP Camera Reference Design.
diff --git a/sound/soc/s6000/Makefile b/sound/soc/s6000/Makefile
deleted file mode 100644 (file)
index 0f0ae2a..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# s6000 Platform Support
-snd-soc-s6000-objs := s6000-pcm.o
-snd-soc-s6000-i2s-objs := s6000-i2s.o
-
-obj-$(CONFIG_SND_S6000_SOC_PCM) += snd-soc-s6000.o
-obj-$(CONFIG_SND_S6000_SOC_I2S) += snd-soc-s6000-i2s.o
-
-# s6105 Machine Support
-snd-soc-s6ipcam-objs := s6105-ipcam.o
-
-obj-$(CONFIG_SND_S6000_SOC_S6IPCAM) += snd-soc-s6ipcam.o
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c
deleted file mode 100644 (file)
index 1c8d011..0000000
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * ALSA SoC I2S Audio Layer for the Stretch S6000 family
- *
- * Author:      Daniel Gloeckner, <dg@emlix.com>
- * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-
-#include "s6000-i2s.h"
-#include "s6000-pcm.h"
-
-struct s6000_i2s_dev {
-       dma_addr_t sifbase;
-       u8 __iomem *scbbase;
-       unsigned int wide;
-       unsigned int channel_in;
-       unsigned int channel_out;
-       unsigned int lines_in;
-       unsigned int lines_out;
-       struct s6000_pcm_dma_params dma_params;
-};
-
-#define S6_I2S_INTERRUPT_STATUS        0x00
-#define   S6_I2S_INT_OVERRUN   1
-#define   S6_I2S_INT_UNDERRUN  2
-#define   S6_I2S_INT_ALIGNMENT 4
-#define S6_I2S_INTERRUPT_ENABLE        0x04
-#define S6_I2S_INTERRUPT_RAW   0x08
-#define S6_I2S_INTERRUPT_CLEAR 0x0C
-#define S6_I2S_INTERRUPT_SET   0x10
-#define S6_I2S_MODE            0x20
-#define   S6_I2S_DUAL          0
-#define   S6_I2S_WIDE          1
-#define S6_I2S_TX_DEFAULT      0x24
-#define S6_I2S_DATA_CFG(c)     (0x40 + 0x10 * (c))
-#define   S6_I2S_IN            0
-#define   S6_I2S_OUT           1
-#define   S6_I2S_UNUSED                2
-#define S6_I2S_INTERFACE_CFG(c)        (0x44 + 0x10 * (c))
-#define   S6_I2S_DIV_MASK      0x001fff
-#define   S6_I2S_16BIT         0x000000
-#define   S6_I2S_20BIT         0x002000
-#define   S6_I2S_24BIT         0x004000
-#define   S6_I2S_32BIT         0x006000
-#define   S6_I2S_BITS_MASK     0x006000
-#define   S6_I2S_MEM_16BIT     0x000000
-#define   S6_I2S_MEM_32BIT     0x008000
-#define   S6_I2S_MEM_MASK      0x008000
-#define   S6_I2S_CHANNELS_SHIFT        16
-#define   S6_I2S_CHANNELS_MASK 0x030000
-#define   S6_I2S_SCK_IN                0x000000
-#define   S6_I2S_SCK_OUT       0x040000
-#define   S6_I2S_SCK_DIR       0x040000
-#define   S6_I2S_WS_IN         0x000000
-#define   S6_I2S_WS_OUT                0x080000
-#define   S6_I2S_WS_DIR                0x080000
-#define   S6_I2S_LEFT_FIRST    0x000000
-#define   S6_I2S_RIGHT_FIRST   0x100000
-#define   S6_I2S_FIRST         0x100000
-#define   S6_I2S_CUR_SCK       0x200000
-#define   S6_I2S_CUR_WS                0x400000
-#define S6_I2S_ENABLE(c)       (0x48 + 0x10 * (c))
-#define   S6_I2S_DISABLE_IF    0x02
-#define   S6_I2S_ENABLE_IF     0x03
-#define   S6_I2S_IS_BUSY       0x04
-#define   S6_I2S_DMA_ACTIVE    0x08
-#define   S6_I2S_IS_ENABLED    0x10
-
-#define S6_I2S_NUM_LINES       4
-
-#define S6_I2S_SIF_PORT0       0x0000000
-#define S6_I2S_SIF_PORT1       0x0000080 /* docs say 0x0000010 */
-
-static inline void s6_i2s_write_reg(struct s6000_i2s_dev *dev, int reg, u32 val)
-{
-       writel(val, dev->scbbase + reg);
-}
-
-static inline u32 s6_i2s_read_reg(struct s6000_i2s_dev *dev, int reg)
-{
-       return readl(dev->scbbase + reg);
-}
-
-static inline void s6_i2s_mod_reg(struct s6000_i2s_dev *dev, int reg,
-                                 u32 mask, u32 val)
-{
-       val ^= s6_i2s_read_reg(dev, reg) & ~mask;
-       s6_i2s_write_reg(dev, reg, val);
-}
-
-static void s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel)
-{
-       int i, j, cur, prev;
-
-       /*
-        * Wait for WCLK to toggle 5 times before enabling the channel
-        * s6000 Family Datasheet 3.6.4:
-        *   "At least two cycles of WS must occur between commands
-        *    to disable or enable the interface"
-        */
-       j = 0;
-       prev = ~S6_I2S_CUR_WS;
-       for (i = 1000000; --i && j < 6; ) {
-               cur = s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(channel))
-                      & S6_I2S_CUR_WS;
-               if (prev != cur) {
-                       prev = cur;
-                       j++;
-               }
-       }
-       if (j < 6)
-               printk(KERN_WARNING "s6000-i2s: timeout waiting for WCLK\n");
-
-       s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_ENABLE_IF);
-}
-
-static void s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel)
-{
-       s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_DISABLE_IF);
-}
-
-static void s6000_i2s_start(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-       int channel;
-
-       channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
-                       dev->channel_out : dev->channel_in;
-
-       s6000_i2s_start_channel(dev, channel);
-}
-
-static void s6000_i2s_stop(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-       int channel;
-
-       channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
-                       dev->channel_out : dev->channel_in;
-
-       s6000_i2s_stop_channel(dev, channel);
-}
-
-static int s6000_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
-                            int after)
-{
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ^ !after)
-                       s6000_i2s_start(substream);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               if (!after)
-                       s6000_i2s_stop(substream);
-       }
-       return 0;
-}
-
-static unsigned int s6000_i2s_int_sources(struct s6000_i2s_dev *dev)
-{
-       unsigned int pending;
-       pending = s6_i2s_read_reg(dev, S6_I2S_INTERRUPT_RAW);
-       pending &= S6_I2S_INT_ALIGNMENT |
-                  S6_I2S_INT_UNDERRUN |
-                  S6_I2S_INT_OVERRUN;
-       s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR, pending);
-
-       return pending;
-}
-
-static unsigned int s6000_i2s_check_xrun(struct snd_soc_dai *cpu_dai)
-{
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
-       unsigned int errors;
-       unsigned int ret;
-
-       errors = s6000_i2s_int_sources(dev);
-       if (likely(!errors))
-               return 0;
-
-       ret = 0;
-       if (errors & S6_I2S_INT_ALIGNMENT)
-               printk(KERN_ERR "s6000-i2s: WCLK misaligned\n");
-       if (errors & S6_I2S_INT_UNDERRUN)
-               ret |= 1 << SNDRV_PCM_STREAM_PLAYBACK;
-       if (errors & S6_I2S_INT_OVERRUN)
-               ret |= 1 << SNDRV_PCM_STREAM_CAPTURE;
-       return ret;
-}
-
-static void s6000_i2s_wait_disabled(struct s6000_i2s_dev *dev)
-{
-       int channel;
-       int n = 50;
-       for (channel = 0; channel < 2; channel++) {
-               while (--n >= 0) {
-                       int v = s6_i2s_read_reg(dev, S6_I2S_ENABLE(channel));
-                       if ((v & S6_I2S_IS_ENABLED)
-                           || !(v & (S6_I2S_DMA_ACTIVE | S6_I2S_IS_BUSY)))
-                               break;
-                       udelay(20);
-               }
-       }
-       if (n < 0)
-               printk(KERN_WARNING "s6000-i2s: timeout disabling interfaces");
-}
-
-static int s6000_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
-                                  unsigned int fmt)
-{
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
-       u32 w;
-
-       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-       case SND_SOC_DAIFMT_CBM_CFM:
-               w = S6_I2S_SCK_IN | S6_I2S_WS_IN;
-               break;
-       case SND_SOC_DAIFMT_CBS_CFM:
-               w = S6_I2S_SCK_OUT | S6_I2S_WS_IN;
-               break;
-       case SND_SOC_DAIFMT_CBM_CFS:
-               w = S6_I2S_SCK_IN | S6_I2S_WS_OUT;
-               break;
-       case SND_SOC_DAIFMT_CBS_CFS:
-               w = S6_I2S_SCK_OUT | S6_I2S_WS_OUT;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-       case SND_SOC_DAIFMT_NB_NF:
-               w |= S6_I2S_LEFT_FIRST;
-               break;
-       case SND_SOC_DAIFMT_NB_IF:
-               w |= S6_I2S_RIGHT_FIRST;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(0),
-                      S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
-       s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(1),
-                      S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
-
-       return 0;
-}
-
-static int s6000_i2s_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
-{
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
-
-       if (!div || (div & 1) || div > (S6_I2S_DIV_MASK + 1) * 2)
-               return -EINVAL;
-
-       s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(div_id),
-                      S6_I2S_DIV_MASK, div / 2 - 1);
-       return 0;
-}
-
-static int s6000_i2s_hw_params(struct snd_pcm_substream *substream,
-                              struct snd_pcm_hw_params *params,
-                              struct snd_soc_dai *dai)
-{
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
-       int interf;
-       u32 w = 0;
-
-       if (dev->wide)
-               interf = 0;
-       else {
-               w |= (((params_channels(params) - 2) / 2)
-                     << S6_I2S_CHANNELS_SHIFT) & S6_I2S_CHANNELS_MASK;
-               interf = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                               ? dev->channel_out : dev->channel_in;
-       }
-
-       switch (params_format(params)) {
-       case SNDRV_PCM_FORMAT_S16_LE:
-               w |= S6_I2S_16BIT | S6_I2S_MEM_16BIT;
-               break;
-       case SNDRV_PCM_FORMAT_S32_LE:
-               w |= S6_I2S_32BIT | S6_I2S_MEM_32BIT;
-               break;
-       default:
-               printk(KERN_WARNING "s6000-i2s: unsupported PCM format %x\n",
-                      params_format(params));
-               return -EINVAL;
-       }
-
-       if (s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(interf))
-            & S6_I2S_IS_ENABLED) {
-               printk(KERN_ERR "s6000-i2s: interface already enabled\n");
-               return -EBUSY;
-       }
-
-       s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(interf),
-                      S6_I2S_CHANNELS_MASK|S6_I2S_MEM_MASK|S6_I2S_BITS_MASK,
-                      w);
-
-       return 0;
-}
-
-static int s6000_i2s_dai_probe(struct snd_soc_dai *dai)
-{
-       struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
-       struct s6000_snd_platform_data *pdata = dai->dev->platform_data;
-
-       if (!pdata)
-               return -EINVAL;
-
-       dai->capture_dma_data = &dev->dma_params;
-       dai->playback_dma_data = &dev->dma_params;
-
-       dev->wide = pdata->wide;
-       dev->channel_in = pdata->channel_in;
-       dev->channel_out = pdata->channel_out;
-       dev->lines_in = pdata->lines_in;
-       dev->lines_out = pdata->lines_out;
-
-       s6_i2s_write_reg(dev, S6_I2S_MODE,
-                        dev->wide ? S6_I2S_WIDE : S6_I2S_DUAL);
-
-       if (dev->wide) {
-               int i;
-
-               if (dev->lines_in + dev->lines_out > S6_I2S_NUM_LINES)
-                       return -EINVAL;
-
-               dev->channel_in = 0;
-               dev->channel_out = 1;
-               dai->driver->capture.channels_min = 2 * dev->lines_in;
-               dai->driver->capture.channels_max = dai->driver->capture.channels_min;
-               dai->driver->playback.channels_min = 2 * dev->lines_out;
-               dai->driver->playback.channels_max = dai->driver->playback.channels_min;
-
-               for (i = 0; i < dev->lines_out; i++)
-                       s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_OUT);
-
-               for (; i < S6_I2S_NUM_LINES - dev->lines_in; i++)
-                       s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i),
-                                        S6_I2S_UNUSED);
-
-               for (; i < S6_I2S_NUM_LINES; i++)
-                       s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_IN);
-       } else {
-               unsigned int cfg[2] = {S6_I2S_UNUSED, S6_I2S_UNUSED};
-
-               if (dev->lines_in > 1 || dev->lines_out > 1)
-                       return -EINVAL;
-
-               dai->driver->capture.channels_min = 2 * dev->lines_in;
-               dai->driver->capture.channels_max = 8 * dev->lines_in;
-               dai->driver->playback.channels_min = 2 * dev->lines_out;
-               dai->driver->playback.channels_max = 8 * dev->lines_out;
-
-               if (dev->lines_in)
-                       cfg[dev->channel_in] = S6_I2S_IN;
-               if (dev->lines_out)
-                       cfg[dev->channel_out] = S6_I2S_OUT;
-
-               s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(0), cfg[0]);
-               s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(1), cfg[1]);
-       }
-
-       if (dev->lines_out) {
-               if (dev->lines_in) {
-                       if (!dev->dma_params.dma_out)
-                               return -ENODEV;
-               } else {
-                       dev->dma_params.dma_out = dev->dma_params.dma_in;
-                       dev->dma_params.dma_in = 0;
-               }
-       }
-       dev->dma_params.sif_in = dev->sifbase + (dev->channel_in ?
-                                       S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
-       dev->dma_params.sif_out = dev->sifbase + (dev->channel_out ?
-                                       S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
-       dev->dma_params.same_rate = pdata->same_rate | pdata->wide;
-       return 0;
-}
-
-#define S6000_I2S_RATES SNDRV_PCM_RATE_CONTINUOUS
-#define S6000_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
-
-static const struct snd_soc_dai_ops s6000_i2s_dai_ops = {
-       .set_fmt = s6000_i2s_set_dai_fmt,
-       .set_clkdiv = s6000_i2s_set_clkdiv,
-       .hw_params = s6000_i2s_hw_params,
-};
-
-static struct snd_soc_dai_driver s6000_i2s_dai = {
-       .probe = s6000_i2s_dai_probe,
-       .playback = {
-               .channels_min = 2,
-               .channels_max = 8,
-               .formats = S6000_I2S_FORMATS,
-               .rates = S6000_I2S_RATES,
-               .rate_min = 0,
-               .rate_max = 1562500,
-       },
-       .capture = {
-               .channels_min = 2,
-               .channels_max = 8,
-               .formats = S6000_I2S_FORMATS,
-               .rates = S6000_I2S_RATES,
-               .rate_min = 0,
-               .rate_max = 1562500,
-       },
-       .ops = &s6000_i2s_dai_ops,
-};
-
-static const struct snd_soc_component_driver s6000_i2s_component = {
-       .name           = "s6000-i2s",
-};
-
-static int s6000_i2s_probe(struct platform_device *pdev)
-{
-       struct s6000_i2s_dev *dev;
-       struct resource *scbmem, *sifmem, *region, *dma1, *dma2;
-       u8 __iomem *mmio;
-       int ret;
-
-       scbmem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!scbmem) {
-               dev_err(&pdev->dev, "no mem resource?\n");
-               ret = -ENODEV;
-               goto err_release_none;
-       }
-
-       region = request_mem_region(scbmem->start, resource_size(scbmem),
-                                                               pdev->name);
-       if (!region) {
-               dev_err(&pdev->dev, "I2S SCB region already claimed\n");
-               ret = -EBUSY;
-               goto err_release_none;
-       }
-
-       mmio = ioremap(scbmem->start, resource_size(scbmem));
-       if (!mmio) {
-               dev_err(&pdev->dev, "can't ioremap SCB region\n");
-               ret = -ENOMEM;
-               goto err_release_scb;
-       }
-
-       sifmem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!sifmem) {
-               dev_err(&pdev->dev, "no second mem resource?\n");
-               ret = -ENODEV;
-               goto err_release_map;
-       }
-
-       region = request_mem_region(sifmem->start, resource_size(sifmem),
-                                                               pdev->name);
-       if (!region) {
-               dev_err(&pdev->dev, "I2S SIF region already claimed\n");
-               ret = -EBUSY;
-               goto err_release_map;
-       }
-
-       dma1 = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-       if (!dma1) {
-               dev_err(&pdev->dev, "no dma resource?\n");
-               ret = -ENODEV;
-               goto err_release_sif;
-       }
-
-       region = request_mem_region(dma1->start, resource_size(dma1),
-                                                               pdev->name);
-       if (!region) {
-               dev_err(&pdev->dev, "I2S DMA region already claimed\n");
-               ret = -EBUSY;
-               goto err_release_sif;
-       }
-
-       dma2 = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-       if (dma2) {
-               region = request_mem_region(dma2->start, resource_size(dma2),
-                                                               pdev->name);
-               if (!region) {
-                       dev_err(&pdev->dev,
-                               "I2S DMA region already claimed\n");
-                       ret = -EBUSY;
-                       goto err_release_dma1;
-               }
-       }
-
-       dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL);
-       if (!dev) {
-               ret = -ENOMEM;
-               goto err_release_dma2;
-       }
-       dev_set_drvdata(&pdev->dev, dev);
-
-       dev->sifbase = sifmem->start;
-       dev->scbbase = mmio;
-
-       s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
-       s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR,
-                        S6_I2S_INT_ALIGNMENT |
-                        S6_I2S_INT_UNDERRUN |
-                        S6_I2S_INT_OVERRUN);
-
-       s6000_i2s_stop_channel(dev, 0);
-       s6000_i2s_stop_channel(dev, 1);
-       s6000_i2s_wait_disabled(dev);
-
-       dev->dma_params.check_xrun = s6000_i2s_check_xrun;
-       dev->dma_params.trigger = s6000_i2s_trigger;
-       dev->dma_params.dma_in = dma1->start;
-       dev->dma_params.dma_out = dma2 ? dma2->start : 0;
-       dev->dma_params.irq = platform_get_irq(pdev, 0);
-       if (dev->dma_params.irq < 0) {
-               dev_err(&pdev->dev, "no irq resource?\n");
-               ret = -ENODEV;
-               goto err_release_dev;
-       }
-
-       s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE,
-                        S6_I2S_INT_ALIGNMENT |
-                        S6_I2S_INT_UNDERRUN |
-                        S6_I2S_INT_OVERRUN);
-
-       ret = snd_soc_register_component(&pdev->dev, &s6000_i2s_component,
-                                        &s6000_i2s_dai, 1);
-       if (ret)
-               goto err_release_dev;
-
-       return 0;
-
-err_release_dev:
-       kfree(dev);
-err_release_dma2:
-       if (dma2)
-               release_mem_region(dma2->start, resource_size(dma2));
-err_release_dma1:
-       release_mem_region(dma1->start, resource_size(dma1));
-err_release_sif:
-       release_mem_region(sifmem->start, resource_size(sifmem));
-err_release_map:
-       iounmap(mmio);
-err_release_scb:
-       release_mem_region(scbmem->start, resource_size(scbmem));
-err_release_none:
-       return ret;
-}
-
-static int s6000_i2s_remove(struct platform_device *pdev)
-{
-       struct s6000_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
-       struct resource *region;
-       void __iomem *mmio = dev->scbbase;
-
-       snd_soc_unregister_component(&pdev->dev);
-
-       s6000_i2s_stop_channel(dev, 0);
-       s6000_i2s_stop_channel(dev, 1);
-
-       s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
-       kfree(dev);
-
-       region = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-       release_mem_region(region->start, resource_size(region));
-
-       region = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-       if (region)
-               release_mem_region(region->start, resource_size(region));
-
-       region = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(region->start, resource_size(region));
-
-       iounmap(mmio);
-       region = platform_get_resource(pdev, IORESOURCE_IO, 0);
-       release_mem_region(region->start, resource_size(region));
-
-       return 0;
-}
-
-static struct platform_driver s6000_i2s_driver = {
-       .probe  = s6000_i2s_probe,
-       .remove = s6000_i2s_remove,
-       .driver = {
-               .name   = "s6000-i2s",
-               .owner  = THIS_MODULE,
-       },
-};
-
-module_platform_driver(s6000_i2s_driver);
-
-MODULE_AUTHOR("Daniel Gloeckner");
-MODULE_DESCRIPTION("Stretch s6000 family I2S SoC Interface");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-i2s.h b/sound/soc/s6000/s6000-i2s.h
deleted file mode 100644 (file)
index 86aa192..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * ALSA SoC I2S Audio Layer for the Stretch s6000 family
- *
- * Author:      Daniel Gloeckner, <dg@emlix.com>
- * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _S6000_I2S_H
-#define _S6000_I2S_H
-
-struct s6000_snd_platform_data {
-       int lines_in;
-       int lines_out;
-       int channel_in;
-       int channel_out;
-       int wide;
-       int same_rate;
-};
-#endif
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
deleted file mode 100644 (file)
index fb8461e..0000000
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * ALSA PCM interface for the Stetch s6000 family
- *
- * Author:      Daniel Gloeckner, <dg@emlix.com>
- * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <asm/dma.h>
-#include <variant/dmac.h>
-
-#include "s6000-pcm.h"
-
-#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
-#define S6_PCM_PREALLOCATE_MAX  (2048 * 1024)
-
-static struct snd_pcm_hardware s6000_pcm_hardware = {
-       .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
-                SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
-                SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX),
-       .buffer_bytes_max = 0x7ffffff0,
-       .period_bytes_min = 16,
-       .period_bytes_max = 0xfffff0,
-       .periods_min = 2,
-       .periods_max = 1024, /* no limit */
-       .fifo_size = 0,
-};
-
-struct s6000_runtime_data {
-       spinlock_t lock;
-       int period;             /* current DMA period */
-};
-
-static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream)
-{
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       struct s6000_runtime_data *prtd = runtime->private_data;
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       int channel;
-       unsigned int period_size;
-       unsigned int dma_offset;
-       dma_addr_t dma_pos;
-       dma_addr_t src, dst;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       period_size = snd_pcm_lib_period_bytes(substream);
-       dma_offset = prtd->period * period_size;
-       dma_pos = runtime->dma_addr + dma_offset;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               src = dma_pos;
-               dst = par->sif_out;
-               channel = par->dma_out;
-       } else {
-               src = par->sif_in;
-               dst = dma_pos;
-               channel = par->dma_in;
-       }
-
-       if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel),
-                                   DMA_INDEX_CHNL(channel)))
-               return;
-
-       if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) {
-               printk(KERN_ERR "s6000-pcm: fifo full\n");
-               return;
-       }
-
-       if (WARN_ON(period_size & 15))
-               return;
-       s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
-                       src, dst, period_size);
-
-       prtd->period++;
-       if (unlikely(prtd->period >= runtime->periods))
-               prtd->period = 0;
-}
-
-static irqreturn_t s6000_pcm_irq(int irq, void *data)
-{
-       struct snd_pcm *pcm = data;
-       struct snd_soc_pcm_runtime *runtime = pcm->private_data;
-       struct s6000_runtime_data *prtd;
-       unsigned int has_xrun;
-       int i, ret = IRQ_NONE;
-
-       for (i = 0; i < 2; ++i) {
-               struct snd_pcm_substream *substream = pcm->streams[i].substream;
-               struct s6000_pcm_dma_params *params =
-                                       snd_soc_dai_get_dma_data(runtime->cpu_dai, substream);
-               u32 channel;
-               unsigned int pending;
-
-               if (substream == SNDRV_PCM_STREAM_PLAYBACK)
-                       channel = params->dma_out;
-               else
-                       channel = params->dma_in;
-
-               has_xrun = params->check_xrun(runtime->cpu_dai);
-
-               if (!channel)
-                       continue;
-
-               if (unlikely(has_xrun & (1 << i)) &&
-                   substream->runtime &&
-                   snd_pcm_running(substream)) {
-                       dev_dbg(pcm->dev, "xrun\n");
-                       snd_pcm_stream_lock(substream);
-                       snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
-                       snd_pcm_stream_unlock(substream);
-                       ret = IRQ_HANDLED;
-               }
-
-               pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
-                                            DMA_INDEX_CHNL(channel));
-
-               if (pending & 1) {
-                       ret = IRQ_HANDLED;
-                       if (likely(substream->runtime &&
-                                  snd_pcm_running(substream))) {
-                               snd_pcm_period_elapsed(substream);
-                               dev_dbg(pcm->dev, "period elapsed %x %x\n",
-                                      s6dmac_cur_src(DMA_MASK_DMAC(channel),
-                                                  DMA_INDEX_CHNL(channel)),
-                                      s6dmac_cur_dst(DMA_MASK_DMAC(channel),
-                                                  DMA_INDEX_CHNL(channel)));
-                               prtd = substream->runtime->private_data;
-                               spin_lock(&prtd->lock);
-                               s6000_pcm_enqueue_dma(substream);
-                               spin_unlock(&prtd->lock);
-                       }
-               }
-
-               if (unlikely(pending & ~7)) {
-                       if (pending & (1 << 3))
-                               printk(KERN_WARNING
-                                      "s6000-pcm: DMA %x Underflow\n",
-                                      channel);
-                       if (pending & (1 << 4))
-                               printk(KERN_WARNING
-                                      "s6000-pcm: DMA %x Overflow\n",
-                                      channel);
-                       if (pending & 0x1e0)
-                               printk(KERN_WARNING
-                                      "s6000-pcm: DMA %x Master Error "
-                                      "(mask %x)\n",
-                                      channel, pending >> 5);
-
-               }
-       }
-
-       return ret;
-}
-
-static int s6000_pcm_start(struct snd_pcm_substream *substream)
-{
-       struct s6000_runtime_data *prtd = substream->runtime->private_data;
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       unsigned long flags;
-       int srcinc;
-       u32 dma;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       spin_lock_irqsave(&prtd->lock, flags);
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               srcinc = 1;
-               dma = par->dma_out;
-       } else {
-               srcinc = 0;
-               dma = par->dma_in;
-       }
-       s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
-                          1 /* priority 1 (0 is max) */,
-                          0 /* peripheral requests w/o xfer length mode */,
-                          srcinc /* source address increment */,
-                          srcinc^1 /* destination address increment */,
-                          0 /* chunksize 0 (skip impossible on this dma) */,
-                          0 /* source skip after chunk (impossible) */,
-                          0 /* destination skip after chunk (impossible) */,
-                          4 /* 16 byte burst size */,
-                          -1 /* don't conserve bandwidth */,
-                          0 /* low watermark irq descriptor threshold */,
-                          0 /* disable hardware timestamps */,
-                          1 /* enable channel */);
-
-       s6000_pcm_enqueue_dma(substream);
-       s6000_pcm_enqueue_dma(substream);
-
-       spin_unlock_irqrestore(&prtd->lock, flags);
-
-       return 0;
-}
-
-static int s6000_pcm_stop(struct snd_pcm_substream *substream)
-{
-       struct s6000_runtime_data *prtd = substream->runtime->private_data;
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       unsigned long flags;
-       u32 channel;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               channel = par->dma_out;
-       else
-               channel = par->dma_in;
-
-       s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
-                                 DMA_INDEX_CHNL(channel), 0);
-
-       spin_lock_irqsave(&prtd->lock, flags);
-
-       s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel));
-
-       spin_unlock_irqrestore(&prtd->lock, flags);
-
-       return 0;
-}
-
-static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       int ret;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       ret = par->trigger(substream, cmd, 0);
-       if (ret < 0)
-               return ret;
-
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               ret = s6000_pcm_start(substream);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ret = s6000_pcm_stop(substream);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-       if (ret < 0)
-               return ret;
-
-       return par->trigger(substream, cmd, 1);
-}
-
-static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
-{
-       struct s6000_runtime_data *prtd = substream->runtime->private_data;
-
-       prtd->period = 0;
-
-       return 0;
-}
-
-static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       struct s6000_runtime_data *prtd = runtime->private_data;
-       unsigned long flags;
-       unsigned int offset;
-       dma_addr_t count;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       spin_lock_irqsave(&prtd->lock, flags);
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out),
-                                      DMA_INDEX_CHNL(par->dma_out));
-       else
-               count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in),
-                                      DMA_INDEX_CHNL(par->dma_in));
-
-       count -= runtime->dma_addr;
-
-       spin_unlock_irqrestore(&prtd->lock, flags);
-
-       offset = bytes_to_frames(runtime, count);
-       if (unlikely(offset >= runtime->buffer_size))
-               offset = 0;
-
-       return offset;
-}
-
-static int s6000_pcm_open(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       struct s6000_runtime_data *prtd;
-       int ret;
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-       snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
-
-       ret = snd_pcm_hw_constraint_step(runtime, 0,
-                                        SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
-       if (ret < 0)
-               return ret;
-       ret = snd_pcm_hw_constraint_step(runtime, 0,
-                                        SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
-       if (ret < 0)
-               return ret;
-       ret = snd_pcm_hw_constraint_integer(runtime,
-                                           SNDRV_PCM_HW_PARAM_PERIODS);
-       if (ret < 0)
-               return ret;
-
-       if (par->same_rate) {
-               int rate;
-               spin_lock(&par->lock); /* needed? */
-               rate = par->rate;
-               spin_unlock(&par->lock);
-               if (rate != -1) {
-                       ret = snd_pcm_hw_constraint_minmax(runtime,
-                                                       SNDRV_PCM_HW_PARAM_RATE,
-                                                       rate, rate);
-                       if (ret < 0)
-                               return ret;
-               }
-       }
-
-       prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
-       if (prtd == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&prtd->lock);
-
-       runtime->private_data = prtd;
-
-       return 0;
-}
-
-static int s6000_pcm_close(struct snd_pcm_substream *substream)
-{
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       struct s6000_runtime_data *prtd = runtime->private_data;
-
-       kfree(prtd);
-
-       return 0;
-}
-
-static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
-                                struct snd_pcm_hw_params *hw_params)
-{
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par;
-       int ret;
-       ret = snd_pcm_lib_malloc_pages(substream,
-                                      params_buffer_bytes(hw_params));
-       if (ret < 0) {
-               printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
-               return ret;
-       }
-
-       par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       if (par->same_rate) {
-               spin_lock(&par->lock);
-               if (par->rate == -1 ||
-                   !(par->in_use & ~(1 << substream->stream))) {
-                       par->rate = params_rate(hw_params);
-                       par->in_use |= 1 << substream->stream;
-               } else if (params_rate(hw_params) != par->rate) {
-                       snd_pcm_lib_free_pages(substream);
-                       par->in_use &= ~(1 << substream->stream);
-                       ret = -EBUSY;
-               }
-               spin_unlock(&par->lock);
-       }
-       return ret;
-}
-
-static int s6000_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct s6000_pcm_dma_params *par =
-               snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
-
-       spin_lock(&par->lock);
-       par->in_use &= ~(1 << substream->stream);
-       if (!par->in_use)
-               par->rate = -1;
-       spin_unlock(&par->lock);
-
-       return snd_pcm_lib_free_pages(substream);
-}
-
-static struct snd_pcm_ops s6000_pcm_ops = {
-       .open =         s6000_pcm_open,
-       .close =        s6000_pcm_close,
-       .ioctl =        snd_pcm_lib_ioctl,
-       .hw_params =    s6000_pcm_hw_params,
-       .hw_free =      s6000_pcm_hw_free,
-       .trigger =      s6000_pcm_trigger,
-       .prepare =      s6000_pcm_prepare,
-       .pointer =      s6000_pcm_pointer,
-};
-
-static void s6000_pcm_free(struct snd_pcm *pcm)
-{
-       struct snd_soc_pcm_runtime *runtime = pcm->private_data;
-       struct s6000_pcm_dma_params *params =
-               snd_soc_dai_get_dma_data(runtime->cpu_dai,
-                       pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
-
-       free_irq(params->irq, pcm);
-       snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
-static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
-{
-       struct snd_card *card = runtime->card->snd_card;
-       struct snd_pcm *pcm = runtime->pcm;
-       struct s6000_pcm_dma_params *params;
-       int res;
-
-       params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
-                       pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
-
-       res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
-       if (res)
-               return res;
-
-       if (params->dma_in) {
-               s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
-                                   DMA_INDEX_CHNL(params->dma_in));
-               s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in),
-                                  DMA_INDEX_CHNL(params->dma_in));
-       }
-
-       if (params->dma_out) {
-               s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out),
-                                   DMA_INDEX_CHNL(params->dma_out));
-               s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out),
-                                  DMA_INDEX_CHNL(params->dma_out));
-       }
-
-       res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
-                         "s6000-audio", pcm);
-       if (res) {
-               printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
-               return res;
-       }
-
-       res = snd_pcm_lib_preallocate_pages_for_all(pcm,
-                                                   SNDRV_DMA_TYPE_DEV,
-                                                   card->dev,
-                                                   S6_PCM_PREALLOCATE_SIZE,
-                                                   S6_PCM_PREALLOCATE_MAX);
-       if (res)
-               printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
-
-       spin_lock_init(&params->lock);
-       params->in_use = 0;
-       params->rate = -1;
-       return 0;
-}
-
-static struct snd_soc_platform_driver s6000_soc_platform = {
-       .ops =          &s6000_pcm_ops,
-       .pcm_new =      s6000_pcm_new,
-       .pcm_free =     s6000_pcm_free,
-};
-
-static int s6000_soc_platform_probe(struct platform_device *pdev)
-{
-       return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
-}
-
-static int s6000_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver s6000_pcm_driver = {
-       .driver = {
-                       .name = "s6000-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-
-       .probe = s6000_soc_platform_probe,
-       .remove = s6000_soc_platform_remove,
-};
-
-module_platform_driver(s6000_pcm_driver);
-
-MODULE_AUTHOR("Daniel Gloeckner");
-MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-pcm.h b/sound/soc/s6000/s6000-pcm.h
deleted file mode 100644 (file)
index 09d9b88..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * ALSA PCM interface for the Stretch s6000 family
- *
- * Author:      Daniel Gloeckner, <dg@emlix.com>
- * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _S6000_PCM_H
-#define _S6000_PCM_H
-
-struct snd_soc_dai;
-struct snd_pcm_substream;
-
-struct s6000_pcm_dma_params {
-       unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai);
-       int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
-       dma_addr_t sif_in;
-       dma_addr_t sif_out;
-       u32 dma_in;
-       u32 dma_out;
-       int irq;
-       int same_rate;
-
-       spinlock_t lock;
-       int in_use;
-       int rate;
-};
-
-#endif
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
deleted file mode 100644 (file)
index 3510c01..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * ASoC driver for Stretch s6105 IP camera platform
- *
- * Author:      Daniel Gloeckner, <dg@emlix.com>
- * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-
-#include "s6000-pcm.h"
-#include "s6000-i2s.h"
-
-#define S6105_CAM_CODEC_CLOCK 12288000
-
-static int s6105_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       int ret = 0;
-
-       /* set codec DAI configuration */
-       ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-                                            SND_SOC_DAIFMT_CBM_CFM);
-       if (ret < 0)
-               return ret;
-
-       /* set cpu DAI configuration */
-       ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
-                                          SND_SOC_DAIFMT_NB_NF);
-       if (ret < 0)
-               return ret;
-
-       /* set the codec system clock */
-       ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
-                                           SND_SOC_CLOCK_OUT);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static struct snd_soc_ops s6105_ops = {
-       .hw_params = s6105_hw_params,
-};
-
-/* s6105 machine dapm widgets */
-static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
-       SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
-       SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
-       SND_SOC_DAPM_LINE("Audio In", NULL),
-};
-
-/* s6105 machine audio_mapnections to the codec pins */
-static const struct snd_soc_dapm_route audio_map[] = {
-       /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
-       {"Audio Out Differential", NULL, "HPLOUT"},
-       {"Audio Out Differential", NULL, "HPLCOM"},
-       {"Audio Out Stereo", NULL, "HPLOUT"},
-       {"Audio Out Stereo", NULL, "HPROUT"},
-
-       /* Audio In connected to LINE1L, LINE1R */
-       {"LINE1L", NULL, "Audio In"},
-       {"LINE1R", NULL, "Audio In"},
-};
-
-static int output_type_info(struct snd_kcontrol *kcontrol,
-                           struct snd_ctl_elem_info *uinfo)
-{
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-       uinfo->count = 1;
-       uinfo->value.enumerated.items = 2;
-       if (uinfo->value.enumerated.item) {
-               uinfo->value.enumerated.item = 1;
-               strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
-       } else {
-               strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
-       }
-       return 0;
-}
-
-static int output_type_get(struct snd_kcontrol *kcontrol,
-                          struct snd_ctl_elem_value *ucontrol)
-{
-       ucontrol->value.enumerated.item[0] = kcontrol->private_value;
-       return 0;
-}
-
-static int output_type_put(struct snd_kcontrol *kcontrol,
-                          struct snd_ctl_elem_value *ucontrol)
-{
-       struct snd_soc_card *card = kcontrol->private_data;
-       struct snd_soc_dapm_context *dapm = &card->dapm;
-       unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
-       char *differential = "Audio Out Differential";
-       char *stereo = "Audio Out Stereo";
-
-       if (kcontrol->private_value == val)
-               return 0;
-       kcontrol->private_value = val;
-       snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
-       snd_soc_dapm_sync(dapm);
-       snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
-       snd_soc_dapm_sync(dapm);
-
-       return 1;
-}
-
-static const struct snd_kcontrol_new audio_out_mux = {
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-       .name = "Master Output Mux",
-       .index = 0,
-       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
-       .info = output_type_info,
-       .get = output_type_get,
-       .put = output_type_put,
-       .private_value = 1 /* default to stereo */
-};
-
-/* Logic for a aic3x as connected on the s6105 ip camera ref design */
-static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
-{
-       struct snd_soc_card *card = rtd->card;
-
-       /* must correspond to audio_out_mux.private_value initializer */
-       snd_soc_dapm_disable_pin(&card->dapm, "Audio Out Differential");
-
-       snd_ctl_add(card->snd_card, snd_ctl_new1(&audio_out_mux, card));
-
-       return 0;
-}
-
-/* s6105 digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link s6105_dai = {
-       .name = "TLV320AIC31",
-       .stream_name = "AIC31",
-       .cpu_dai_name = "s6000-i2s",
-       .codec_dai_name = "tlv320aic3x-hifi",
-       .platform_name = "s6000-pcm-audio",
-       .codec_name = "tlv320aic3x-codec.0-001a",
-       .init = s6105_aic3x_init,
-       .ops = &s6105_ops,
-};
-
-/* s6105 audio machine driver */
-static struct snd_soc_card snd_soc_card_s6105 = {
-       .name = "Stretch IP Camera",
-       .owner = THIS_MODULE,
-       .dai_link = &s6105_dai,
-       .num_links = 1,
-
-       .dapm_widgets = aic3x_dapm_widgets,
-       .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
-       .dapm_routes = audio_map,
-       .num_dapm_routes = ARRAY_SIZE(audio_map),
-       .fully_routed = true,
-};
-
-static struct s6000_snd_platform_data s6105_snd_data __initdata = {
-       .wide           = 0,
-       .channel_in     = 0,
-       .channel_out    = 1,
-       .lines_in       = 1,
-       .lines_out      = 1,
-       .same_rate      = 1,
-};
-
-static struct platform_device *s6105_snd_device;
-
-/* temporary i2c device creation until this can be moved into the machine
- * support file.
-*/
-static struct i2c_board_info i2c_device[] = {
-       { I2C_BOARD_INFO("tlv320aic33", 0x18), }
-};
-
-static int __init s6105_init(void)
-{
-       int ret;
-
-       i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
-
-       s6105_snd_device = platform_device_alloc("soc-audio", -1);
-       if (!s6105_snd_device)
-               return -ENOMEM;
-
-       platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105);
-       platform_device_add_data(s6105_snd_device, &s6105_snd_data,
-                                sizeof(s6105_snd_data));
-
-       ret = platform_device_add(s6105_snd_device);
-       if (ret)
-               platform_device_put(s6105_snd_device);
-
-       return ret;
-}
-
-static void __exit s6105_exit(void)
-{
-       platform_device_unregister(s6105_snd_device);
-}
-
-module_init(s6105_init);
-module_exit(s6105_exit);
-
-MODULE_AUTHOR("Daniel Gloeckner");
-MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
-MODULE_LICENSE("GPL");
index 8c5c11ca8c539d5292d9ceb3524754e4e06ead49..25114c9a6801d65d822e6f6e082e6e70b1144aab 100644 (file)
@@ -1142,6 +1142,11 @@ static int data_init(int argc, const char **argv)
 
 int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
 {
+       int ret = hists__init();
+
+       if (ret < 0)
+               return ret;
+
        perf_config(perf_default_config, NULL);
 
        argc = parse_options(argc, argv, options, diff_usage, 0);
index 04412b4770a2230ec83296cb179eb397ebb42e94..7af26acf06d9d573e42c1326757af673ce780c50 100644 (file)
@@ -375,7 +375,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_CALLBACK('x', "exec", NULL, "executable|path",
                        "target executable name or path", opt_set_target),
        OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
-                   "Disable symbol demangling"),
+                   "Enable symbol demangling"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
        OPT_END()
index 937e4324ad94ed601bb43a2d12489c2f90eb841b..a3b13d7dc1d43f3caf301b4d9f941c60d88ed37c 100644 (file)
@@ -13,7 +13,7 @@
 #define wmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define rmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
-#define CPUINFO_PROC   "model name"
+#define CPUINFO_PROC   {"model name"}
 #ifndef __NR_perf_event_open
 # define __NR_perf_event_open 336
 #endif
@@ -30,7 +30,7 @@
 #define wmb()          asm volatile("sfence" ::: "memory")
 #define rmb()          asm volatile("lfence" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
-#define CPUINFO_PROC   "model name"
+#define CPUINFO_PROC   {"model name"}
 #ifndef __NR_perf_event_open
 # define __NR_perf_event_open 298
 #endif
 #define mb()           asm volatile ("sync" ::: "memory")
 #define wmb()          asm volatile ("sync" ::: "memory")
 #define rmb()          asm volatile ("sync" ::: "memory")
-#define CPUINFO_PROC   "cpu"
+#define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __s390__
 #define mb()           asm volatile("bcr 15,0" ::: "memory")
 #define wmb()          asm volatile("bcr 15,0" ::: "memory")
 #define rmb()          asm volatile("bcr 15,0" ::: "memory")
-#define CPUINFO_PROC   "vendor_id"
+#define CPUINFO_PROC   {"vendor_id"}
 #endif
 
 #ifdef __sh__
 # define wmb()         asm volatile("" ::: "memory")
 # define rmb()         asm volatile("" ::: "memory")
 #endif
-#define CPUINFO_PROC   "cpu type"
+#define CPUINFO_PROC   {"cpu type"}
 #endif
 
 #ifdef __hppa__
 #define mb()           asm volatile("" ::: "memory")
 #define wmb()          asm volatile("" ::: "memory")
 #define rmb()          asm volatile("" ::: "memory")
-#define CPUINFO_PROC   "cpu"
+#define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __sparc__
 #endif
 #define wmb()          asm volatile("":::"memory")
 #define rmb()          asm volatile("":::"memory")
-#define CPUINFO_PROC   "cpu"
+#define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __alpha__
 #define mb()           asm volatile("mb" ::: "memory")
 #define wmb()          asm volatile("wmb" ::: "memory")
 #define rmb()          asm volatile("mb" ::: "memory")
-#define CPUINFO_PROC   "cpu model"
+#define CPUINFO_PROC   {"cpu model"}
 #endif
 
 #ifdef __ia64__
 #define wmb()          asm volatile ("mf" ::: "memory")
 #define rmb()          asm volatile ("mf" ::: "memory")
 #define cpu_relax()    asm volatile ("hint @pause" ::: "memory")
-#define CPUINFO_PROC   "model name"
+#define CPUINFO_PROC   {"model name"}
 #endif
 
 #ifdef __arm__
 #define mb()           ((void(*)(void))0xffff0fa0)()
 #define wmb()          ((void(*)(void))0xffff0fa0)()
 #define rmb()          ((void(*)(void))0xffff0fa0)()
-#define CPUINFO_PROC   "Processor"
+#define CPUINFO_PROC   {"model name", "Processor"}
 #endif
 
 #ifdef __aarch64__
                                : "memory")
 #define wmb()  mb()
 #define rmb()  mb()
-#define CPUINFO_PROC   "cpu model"
+#define CPUINFO_PROC   {"cpu model"}
 #endif
 
 #ifdef __arc__
 #define mb()           asm volatile("" ::: "memory")
 #define wmb()          asm volatile("" ::: "memory")
 #define rmb()          asm volatile("" ::: "memory")
-#define CPUINFO_PROC   "Processor"
+#define CPUINFO_PROC   {"Processor"}
 #endif
 
 #ifdef __metag__
 #define mb()           asm volatile("" ::: "memory")
 #define wmb()          asm volatile("" ::: "memory")
 #define rmb()          asm volatile("" ::: "memory")
-#define CPUINFO_PROC   "CPU"
+#define CPUINFO_PROC   {"CPU"}
 #endif
 
 #ifdef __xtensa__
 #define mb()           asm volatile("memw" ::: "memory")
 #define wmb()          asm volatile("memw" ::: "memory")
 #define rmb()          asm volatile("" ::: "memory")
-#define CPUINFO_PROC   "core ID"
+#define CPUINFO_PROC   {"core ID"}
 #endif
 
 #ifdef __tile__
 #define wmb()          asm volatile ("mf" ::: "memory")
 #define rmb()          asm volatile ("mf" ::: "memory")
 #define cpu_relax()    asm volatile ("mfspr zero, PASS" ::: "memory")
-#define CPUINFO_PROC    "model name"
+#define CPUINFO_PROC    {"model name"}
 #endif
 
 #define barrier() asm volatile ("" ::: "memory")
index ce0de00399da381accf00472f66c6064cdcee20a..26f5b2fe5dc89832c44eb8c20b5deb6db1c72d3e 100644 (file)
@@ -579,16 +579,12 @@ static int write_version(int fd, struct perf_header *h __maybe_unused,
        return do_write_string(fd, perf_version_string);
 }
 
-static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
-                      struct perf_evlist *evlist __maybe_unused)
+static int __write_cpudesc(int fd, const char *cpuinfo_proc)
 {
-#ifndef CPUINFO_PROC
-#define CPUINFO_PROC NULL
-#endif
        FILE *file;
        char *buf = NULL;
        char *s, *p;
-       const char *search = CPUINFO_PROC;
+       const char *search = cpuinfo_proc;
        size_t len = 0;
        int ret = -1;
 
@@ -638,6 +634,25 @@ done:
        return ret;
 }
 
+static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
+                      struct perf_evlist *evlist __maybe_unused)
+{
+#ifndef CPUINFO_PROC
+#define CPUINFO_PROC {"model name", }
+#endif
+       const char *cpuinfo_procs[] = CPUINFO_PROC;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
+               int ret;
+               ret = __write_cpudesc(fd, cpuinfo_procs[i]);
+               if (ret >= 0)
+                       return ret;
+       }
+       return -1;
+}
+
+
 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
                        struct perf_evlist *evlist __maybe_unused)
 {
index 4906cd81cb56a852843b38e85a1d2f5b82417654..9402885a77f383674e40d54e1a543da111dc6725 100644 (file)
@@ -373,6 +373,9 @@ struct sort_entry sort_cpu = {
 static int64_t
 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
 {
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
        return _sort__dso_cmp(left->branch_info->from.map,
                              right->branch_info->from.map);
 }
@@ -380,13 +383,19 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
                                    size_t size, unsigned int width)
 {
-       return _hist_entry__dso_snprintf(he->branch_info->from.map,
-                                        bf, size, width);
+       if (he->branch_info)
+               return _hist_entry__dso_snprintf(he->branch_info->from.map,
+                                                bf, size, width);
+       else
+               return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
 }
 
 static int64_t
 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
 {
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
        return _sort__dso_cmp(left->branch_info->to.map,
                              right->branch_info->to.map);
 }
@@ -394,8 +403,11 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
                                       size_t size, unsigned int width)
 {
-       return _hist_entry__dso_snprintf(he->branch_info->to.map,
-                                        bf, size, width);
+       if (he->branch_info)
+               return _hist_entry__dso_snprintf(he->branch_info->to.map,
+                                                bf, size, width);
+       else
+               return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
 }
 
 static int64_t
@@ -404,6 +416,12 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
        struct addr_map_symbol *from_l = &left->branch_info->from;
        struct addr_map_symbol *from_r = &right->branch_info->from;
 
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
+       from_l = &left->branch_info->from;
+       from_r = &right->branch_info->from;
+
        if (!from_l->sym && !from_r->sym)
                return _sort__addr_cmp(from_l->addr, from_r->addr);
 
@@ -413,8 +431,13 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
 static int64_t
 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-       struct addr_map_symbol *to_l = &left->branch_info->to;
-       struct addr_map_symbol *to_r = &right->branch_info->to;
+       struct addr_map_symbol *to_l, *to_r;
+
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
+       to_l = &left->branch_info->to;
+       to_r = &right->branch_info->to;
 
        if (!to_l->sym && !to_r->sym)
                return _sort__addr_cmp(to_l->addr, to_r->addr);
@@ -425,19 +448,27 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
                                         size_t size, unsigned int width)
 {
-       struct addr_map_symbol *from = &he->branch_info->from;
-       return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
-                                        he->level, bf, size, width);
+       if (he->branch_info) {
+               struct addr_map_symbol *from = &he->branch_info->from;
 
+               return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
+                                                he->level, bf, size, width);
+       }
+
+       return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
 }
 
 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
                                       size_t size, unsigned int width)
 {
-       struct addr_map_symbol *to = &he->branch_info->to;
-       return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
-                                        he->level, bf, size, width);
+       if (he->branch_info) {
+               struct addr_map_symbol *to = &he->branch_info->to;
 
+               return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
+                                                he->level, bf, size, width);
+       }
+
+       return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
 }
 
 struct sort_entry sort_dso_from = {
@@ -471,11 +502,13 @@ struct sort_entry sort_sym_to = {
 static int64_t
 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-       const unsigned char mp = left->branch_info->flags.mispred !=
-                                       right->branch_info->flags.mispred;
-       const unsigned char p = left->branch_info->flags.predicted !=
-                                       right->branch_info->flags.predicted;
+       unsigned char mp, p;
+
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
 
+       mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
+       p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
        return mp || p;
 }
 
@@ -483,10 +516,12 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
                                    size_t size, unsigned int width){
        static const char *out = "N/A";
 
-       if (he->branch_info->flags.predicted)
-               out = "N";
-       else if (he->branch_info->flags.mispred)
-               out = "Y";
+       if (he->branch_info) {
+               if (he->branch_info->flags.predicted)
+                       out = "N";
+               else if (he->branch_info->flags.mispred)
+                       out = "Y";
+       }
 
        return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
 }
@@ -989,6 +1024,9 @@ struct sort_entry sort_mem_dcacheline = {
 static int64_t
 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
 {
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
        return left->branch_info->flags.abort !=
                right->branch_info->flags.abort;
 }
@@ -996,10 +1034,15 @@ sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
                                    size_t size, unsigned int width)
 {
-       static const char *out = ".";
+       static const char *out = "N/A";
+
+       if (he->branch_info) {
+               if (he->branch_info->flags.abort)
+                       out = "A";
+               else
+                       out = ".";
+       }
 
-       if (he->branch_info->flags.abort)
-               out = "A";
        return repsep_snprintf(bf, size, "%-*s", width, out);
 }
 
@@ -1013,6 +1056,9 @@ struct sort_entry sort_abort = {
 static int64_t
 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
 {
+       if (!left->branch_info || !right->branch_info)
+               return cmp_null(left->branch_info, right->branch_info);
+
        return left->branch_info->flags.in_tx !=
                right->branch_info->flags.in_tx;
 }
@@ -1020,10 +1066,14 @@ sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
                                    size_t size, unsigned int width)
 {
-       static const char *out = ".";
+       static const char *out = "N/A";
 
-       if (he->branch_info->flags.in_tx)
-               out = "T";
+       if (he->branch_info) {
+               if (he->branch_info->flags.in_tx)
+                       out = "T";
+               else
+                       out = ".";
+       }
 
        return repsep_snprintf(bf, size, "%-*s", width, out);
 }
index 2b7b2d91c016b211f48a9823f9837033615b1136..c41411726c7a1052a704f75d829e433f3abdb6b8 100644 (file)
@@ -117,6 +117,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
                if (!new)
                        return -ENOMEM;
                list_add(&new->list, &thread->comm_list);
+
+               if (exec)
+                       unwind__flush_access(thread);
        }
 
        thread->comm_set = true;
index e060386165c5fad82372123e99252f577eb7eac4..4d45c0dfe34347ef57527dd90f132e9067eb57e9 100644 (file)
@@ -539,11 +539,23 @@ int unwind__prepare_access(struct thread *thread)
                return -ENOMEM;
        }
 
+       unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
        thread__set_priv(thread, addr_space);
 
        return 0;
 }
 
+void unwind__flush_access(struct thread *thread)
+{
+       unw_addr_space_t addr_space;
+
+       if (callchain_param.record_mode != CALLCHAIN_DWARF)
+               return;
+
+       addr_space = thread__priv(thread);
+       unw_flush_cache(addr_space, 0, 0);
+}
+
 void unwind__finish_access(struct thread *thread)
 {
        unw_addr_space_t addr_space;
index c17c4855bdbc3e4bcf8423246c92b8f7bc750494..f50b737235eb82eaabb44778c33cb4646f091459 100644 (file)
@@ -23,6 +23,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 #ifdef HAVE_LIBUNWIND_SUPPORT
 int libunwind__arch_reg_id(int regnum);
 int unwind__prepare_access(struct thread *thread);
+void unwind__flush_access(struct thread *thread);
 void unwind__finish_access(struct thread *thread);
 #else
 static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
@@ -30,6 +31,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
        return 0;
 }
 
+static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
 static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
 #endif
 #else
@@ -49,6 +51,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
        return 0;
 }
 
+static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
 static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
 #endif /* __UNWIND_H */