]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 5 Jul 2012 20:20:02 +0000 (13:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 5 Jul 2012 20:20:02 +0000 (13:20 -0700)
Pull ARM SoC fixes from Arnd Bergmann:
 "Small fixes on multiple ARM platforms
   - A build regression from a previous fix on dove and mv78xx0
   - Two fixes for recently (3.5-rc1) changed mmp/pxa code
   - multiple omap2+ bug fixes
   - two trivial fixes for i.MX
   - one v3.5 regression for mxs"

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc:
  ARM: apx4devkit: fix FEC enabling PHY clock
  ARM: OMAP2+: hwmod data: Fix wrong McBSP clock alias on OMAP4
  ARM: OMAP4: hwmod data: temporarily comment out data for the usb_host_fs and aess IP blocks
  ARM: Orion: Fix WDT compile for Dove and MV78xx0
  ARM: mmp: remove mach/gpio-pxa.h
  ARM: imx: assert SCC gate stays enabled
  ARM: OMAP4: TWL6030: ensure sys_nirq1 is mux'd and wakeup enabled
  ARM: OMAP2: Overo: init I2C before MMC to fix MMC suspend/resume failure
  ARM: imx27_visstrim_m10: Do not include <asm/system.h>
  ARM: pxa: hx4700: Fix basic suspend/resume

114 files changed:
Documentation/ABI/testing/sysfs-block-rssd
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/input/fsl-mma8450.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
Documentation/virtual/kvm/api.txt
MAINTAINERS
arch/arm/kernel/vmlinux.lds.S
arch/arm/mm/mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/xmon/xmon.c
arch/x86/kvm/mmu.c
block/blk-cgroup.c
block/blk-core.c
block/blk-timeout.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/clk/clk.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/si.c
drivers/leds/ledtrig-heartbeat.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wlcore/Kconfig
drivers/of/base.c
drivers/of/platform.c
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.h
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/splice.c
include/linux/blkdev.h
include/linux/kvm_host.h
include/linux/splice.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
kernel/relay.c
kernel/trace/trace.c
mm/shmem.c
net/core/dev.c
net/core/skbuff.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nfnetlink.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
security/security.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2200.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 679ce354312281846e4ef2decee0689b0775c985..beef30c046b0d3181bfc353d15704bb4bdaf3da6 100644 (file)
@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure
index 32e48797a14f80de6ebff7a441e489021bfce58a..9884681535ee36bf03a7d7baaa54abb36360d53d 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
index a00c94ccbdeeb871c440b770de814d11c6f757d7..0b96e5737d3a569ca4acfec137cc6870d1b56880 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
 
 Example:
 
index 19f6af47a792986c23a9ed033e15e8639ec5c51b..baf07987ae6863c68b39efa5ba1227a327009337 100644 (file)
@@ -46,8 +46,8 @@ Examples:
 
 ecspi@70010000 { /* ECSPI1 */
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+                  <&gpio4 25 0>; /* GPIO4_25 */
        status = "okay";
 
        pmic: mc13892@0 {
index c7e404b3ef0515b5527583cae877ab48c5d69595..fea541ee8b34a18fbf1aaef9276c17ef992db830 100644 (file)
@@ -29,6 +29,6 @@ esdhc@70008000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70008000 0x4000>;
        interrupts = <2>;
-       cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
-       wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+       cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+       wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
 };
index 7ab9e1a2d8bec19fac2283b5703fae60d2858998..4616fc28ee86e83793550a0ceda9fa25e4b46167 100644 (file)
@@ -19,6 +19,6 @@ ethernet@83fec000 {
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
 };
index 9841057d112bb6e4c46299166ef7c5a9a6da5521..4256a6df9b79355d8c17f5f393c3956d6a5a78ca 100644 (file)
@@ -17,6 +17,6 @@ ecspi@70010000 {
        reg = <0x70010000 0x4000>;
        interrupts = <36>;
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+                  <&gpio3 25 0>; /* GPIO3_25 */
 };
index 6eab91747a86a03c45c60d9bdca74fb28215b3a3..db4d3af3643c407ffad6e4ead2dd81d8c4ab36cf 100644 (file)
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
+ad     Avionic Design GmbH
 adi    Analog Devices, Inc.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
 apm    Applied Micro Circuits Corporation (APM)
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..cb705ec
--- /dev/null
@@ -0,0 +1,50 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index 930126698a0f5b0f6e2c458b53604d581b201063..2c9948379469c1dba5f8c7ef711567067727ff08 100644 (file)
@@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
 PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
 into the hash PTE second double word).
 
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event.  When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin.  The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+
 5. The kvm_run structure
 ------------------------
 
index eb22272b2116c70cb6a6af438c24ab9f2edad9fb..03df1d15ebf3d9de18052bdf797342b261922672 100644 (file)
@@ -4654,8 +4654,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
index 43a31fb06318dc0a1213827a5069b73cac19f5a6..36ff15bbfdd4961afecfef9ea292c6445e9b3836 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index e5dad60b558b468315294b2c8b95c70193b6f74d..cf4528d5177448fb79cb5dc6689c873c2dbddb4a 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index a84aafce2a129e311a0943be0975db3823b88402..a1044f43becd380cdc7216e082fbf267b97a3fae 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 0f3ab06d222260c63d5866e6d6b7ab0ac643e761..eab3492a45c5c5244eca42d58354cf6d8a092836 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index be3cea4407ffad63824c068332079baf16336012..57e168e27b5b865e187d5ee3d34413189a1c5905 100644 (file)
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 {
        struct kvm_mmu_page *page;
 
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return;
+
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
        kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
index 02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df..e7dee617358e810aec57ff25162ff95604fabb3c 100644 (file)
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
index 3c923a7aeb56f1658142b091868c3c29ebffd3c5..93eb3e4f88ce78affc79c5ca6d8b7c7b0759be5a 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               spin_lock_irq(q->queue_lock);
+               for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+                       wake_up_all(&q->rq.wait[i]);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 780354888958cd7ea03aef2c1654b325bc9fb24e..6e4744cbfb56b4ca0d99062a0d9b0437c894016d 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 673c977cc2bfa238e0fe0efa6dddac5193fbccd8..fb52df9744f5fe411908162fbb02257ca0bc097a 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80ef5750f80f4ebc1415d06abc992e18b8a..9a87daa6f4fbd10202ea9d47ae1a549c806a6fb4 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5..fcb956bb4b4c525875f961e7c2398b20c35723f4 100644 (file)
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 9c5c84946b056792fa45d99051bd5c28750db7e1..8e93a6ac9bb65e3497f53c92723a67429ffdf946 100644 (file)
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
                __drbd_chk_io_error(mdev, false);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
index cce7df367b793d111ef875bd5cd30a6f5e08782c..553f43a90953cab40f421658ecb37fa9c20c8c54 100644 (file)
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index 264bc77dcb911c7030c787d3ad87cd79b654ce81..a8fddeb3d638ebcdf9a0191beda816d8f1338ccd 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "Hardware\n--------\n");
-       size += sprintf(&buf[size], "S ACTive      : [ 0x");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Command Issue : [ 0x");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Completed     : [ 0x");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                readl(dd->port->completed[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->mmio + HOST_IRQ_STAT));
        size += sprintf(&buf[size], "\n");
 
-       size += sprintf(&buf[size], "Local\n-----\n");
-       size += sprintf(&buf[size], "Allocated    : [ 0x");
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Commands in Q: [ 0x");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
-
-       return size;
-}
+       if (!len || size)
+               return 0;
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       if (size < 0)
+               return -EINVAL;
 
-       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
                                                        dd->port->flags);
-       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
                                                        dd->dd_flag);
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
-       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
-       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index b2c88da26b2a7b7f94ff77b6f1a1d2047f6b45f3..f51fc23d17bb0e0025c74ac9f495007dcbc8b784 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index aa2712060bfbcd153e446f5adcf80a9d61377e75..9a72277a31df0cb131f879bb8a1503a65a57b7cc 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27dc23fc595d6fca48ea8ec6c4992438d18..9ad3b5ec1dc1c521085db47a7928cc8cf1179701 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952..e4fb3374dcd2aaa6d0f834cd9394564a3c022a38 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index dcbe05616090de6d5a830cc8a5950fb282820eee..9a1eb0cfa95f3f0a00a7334f3b276bf623d88cf6 100644 (file)
@@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       if (clk->parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (clk->parents[i] == parent)
-                               break;
-       else
+       if (!clk->parents)
                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
                                                                GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
-                               if (clk->parents)
-                                       clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
+                               clk->parents[i] = __clk_lookup(parent->name);
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index 5873e481e5d2dadde3c16e0e0883d8e6e3340dbe..a8743c399e83234c976ebdb4b471542a0645c42d 100644 (file)
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index f94792626b94fadae47303f150e7aff278d371ef..36822b924eb12974f7c73af8fe8368707ab08abf 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 59d44937dd9fcc9f1c4350bc590950d5be75175f..84b648a7ddd8cf697fc07fd00a1e2eed39ff46bd 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
index f28bd4b7ef980937c88eb54c30b5534adc5abf3b..21ec9f5653cedc94e729e521e8cf62e4b14c2884 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index c7b61f16ecfd79855315b5ad938499510a2d8b41..0b0279291a73e79109dd95db5ceee4823be1da66 100644 (file)
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index 41dc76db43118a347e4a8a06923b0fd076a12dc3..a019fbb70880bd402aea095b8b90a8ca26a0e66a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/reboot.h>
 #include "leds.h"
 
+static int panic_heartbeats;
+
 struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
        unsigned long brightness = LED_OFF;
        unsigned long delay = 0;
 
+       if (unlikely(panic_heartbeats)) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       panic_heartbeats = 1;
+       return NOTIFY_DONE;
+}
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
 
 static struct notifier_block heartbeat_panic_nb = {
-       .notifier_call = heartbeat_reboot_notifier,
+       .notifier_call = heartbeat_panic_notifier,
 };
 
 static int __init heartbeat_trig_init(void)
index 37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba..ce59824fb41401963113a17af05ac660954bbef2 100644 (file)
@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
        if (r)
                return r;
 
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
        r = dm_pool_reserve_metadata_snap(pool->pmd);
        if (r)
                DMWARN("reserve_metadata_snap message failed.");
index 1c2f9048e1ae010864c4d2478c05f9a510c729e1..a4c219e3c859624925125e54029aac2da6211674 100644 (file)
@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 9339e67fcc79a9f961d016d3f80291ce012323bb..61a1833ebaf33ec40afaf752db9a849439c9bd13 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53bf4aa2b1efe1136c2520067ece16bb9b35..fc90c11620adb026c9842d6e95497248d02556e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba9f6277a7c701615a1b3d788a05fa52559..3d0ed53328831627ab6ec79cb85946f36b0fe7a4 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 400fe144c0cd1c94dd5c325c5ed690a425b7ece3..e5604b32d91ff0017a5a625cfd8106fca92bfe19 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index a9c7981ddd245c6955cbed390b617a9172abde31..8c2754f835ef89a99d247719d2d114f6fba30b13 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 99ae6068e456992ef2b16c98e01d5c2ee0362128..8da6282254c3e822a27702c469b9afce720bda43 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
index d26767246d26ad1d2bb9a2da371ab1bbbdf130fc..04348d76bb30fa8831964ea980ec2df912a45f92 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 0741aded9eb057bbc2454a220bd6a19318e975fa..f2db8fca46a13d8b431d977df4efee8a61bc0959 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
index 351a4097b2baec09c53ce45cace4c03c1c8dcb47..76edbc1be33b4d1151e72c24c1f621c830c3a039 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index 31d37a2b5ba818e1366945b630f8b187ec9fb15b..623e30b9964de29e091f77564ccd53ce8ba30876 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
index 8ce67064b9c5802efe098f702486477b424c273e..90eef07943f4d50bbd027646e170abca5abfb1a3 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index d614c374ed9d2ada5b6923e288528b8201a09807..3b5c4571b55e3c922a4b6e5a94a0a4a8adcb2fce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 3767a122586043512cd6ded88b562fd9b97df88f..b01960fcfbc91110ede59a5e18f9fac94a912bc3 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
index c54b7d37bff159f1fc8a7482a4d8bdfe806b9beb..420d69b2674ceae1282fcc9d8cbbc38d8c5d00af 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index 1c68e564f503197c9642ba1cae5e9633325fbe8c..995ca8e1302efc7562ff9905d8e91d00ffb25f74 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
index e1fcc68124dc3bc78359cf72e2da459ddf9c4560..0735aeb3b26cefcd15c2847a1775718d36cecbc6 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index 0e81904956cf1c04fa37ad01e65282b55e2bf8f9..5c54aa43ca2d81b3936d917aeb68f7331aabb34b 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 3ee23134c02b9b96ee184a5999e9577bf6452511..013680332f075be85a9035bcec33eaec0c2e9285 100644 (file)
@@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
index 9c44088054dd90bd967626d8416e63c083225fc8..900ee129e825987dfb2dbe6b485bfa0bb40dbc74 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffebabc60a6ab1059276664bc3f3f71e129bb..6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index ceb82cd749cc7ba1ca2054ae0b675802d2194190..383820a52beba0d62428614131adbe42def1e5db 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e0377473282f05fab01835578cb9b2579a40217f..fc8a9bfa1248305fababa37b59ca90110a57afaa 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3dcd237c5aada48223e224a7f943ef2692..11e731f3581c2dbf4856d8676ca6ee66c12ccabe 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index 49ebf20c56ebc0ac98dc8d5b25bc3a1164847020..22a5916564b84099576e1c554b37d1fcbcd420b2 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc6551585780b08724bb8112457caafeef220e..3fa4d417699381225e853a56238e0d8506a2f99b 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index d228358e6a403b9c8eee56d63ce4991058975dc9..9970c2b1b19979dc3577472c7c21e27703a38a76 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index 54156b0b5c2d7defe6b42791d022d4c982fd945f..d7b907e67170348e70302e6a1cfe70642a8c6f7e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index d9bfd49b193503f61977a7a1c253e0a981ba07b1..eada3f4ef80100de1e0c3f8f2a236acff6e662a2 100644 (file)
@@ -511,6 +511,22 @@ out:
 }
 EXPORT_SYMBOL(of_find_node_with_property);
 
+static const struct of_device_id *of_match_compat(const struct of_device_id *matches,
+                                                 const char *compat)
+{
+       while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+               const char *cp = matches->compatible;
+               int len = strlen(cp);
+
+               if (len > 0 && of_compat_cmp(compat, cp, len) == 0)
+                       return matches;
+
+               matches++;
+       }
+
+       return NULL;
+}
+
 /**
  * of_match_node - Tell if an device_node has a matching of_match structure
  *     @matches:       array of of device match structures to search in
@@ -521,9 +537,18 @@ EXPORT_SYMBOL(of_find_node_with_property);
 const struct of_device_id *of_match_node(const struct of_device_id *matches,
                                         const struct device_node *node)
 {
+       struct property *prop;
+       const char *cp;
+
        if (!matches)
                return NULL;
 
+       of_property_for_each_string(node, "compatible", prop, cp) {
+               const struct of_device_id *match = of_match_compat(matches, cp);
+               if (match)
+                       return match;
+       }
+
        while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
                int match = 1;
                if (matches->name[0])
@@ -532,10 +557,7 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
                if (matches->type[0])
                        match &= node->type
                                && !strcmp(matches->type, node->type);
-               if (matches->compatible[0])
-                       match &= of_device_is_compatible(node,
-                                               matches->compatible);
-               if (match)
+               if (match && !matches->compatible[0])
                        return matches;
                matches++;
        }
index 343ad29e211c66768491e325046ff0b58bcb15ec..3132ea068d95fada8b01e85a49cfd5859d1c4b6c 100644 (file)
@@ -462,4 +462,5 @@ int of_platform_populate(struct device_node *root,
        of_node_put(root);
        return rc;
 }
+EXPORT_SYMBOL_GPL(of_platform_populate);
 #endif /* CONFIG_OF_ADDRESS */
index 7301cdb4b2cb3cf6c5d8626b2275a85f88d4f55f..a383c18e74e86eebaa847d756e3493e7ca3c9bfd 100644 (file)
@@ -301,10 +301,14 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        eb = path->nodes[level];
-       if (!eb) {
-               WARN_ON(1);
-               ret = 1;
-               goto out;
+       while (!eb) {
+               if (!level) {
+                       WARN_ON(1);
+                       ret = 1;
+                       goto out;
+               }
+               level--;
+               eb = path->nodes[level];
        }
 
        ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
@@ -835,6 +839,7 @@ again:
                        }
                        ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
+                       mutex_unlock(&head->mutex);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
                                goto out;
@@ -928,8 +933,6 @@ again:
        }
 
 out:
-       if (head)
-               mutex_unlock(&head->mutex);
        btrfs_free_path(path);
        while (!list_empty(&prefs)) {
                ref = list_first_entry(&prefs, struct __prelim_ref, list);
index 15cbc2bf4ff0ed11f8bb2e163bc1960c65b27fa7..8206b3900587efa23570b5b8f7f8071ab6e73156 100644 (file)
@@ -1024,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                if (!looped && !tm)
                        return 0;
                /*
-                * we must have key remove operations in the log before the
-                * replace operation.
+                * if there are no tree operation for the oldest root, we simply
+                * return it. this should only happen if that (old) root is at
+                * level 0.
                 */
-               BUG_ON(!tm);
+               if (!tm)
+                       break;
 
+               /*
+                * if there's an operation that's not a root replacement, we
+                * found the oldest version of our root. normally, we'll find a
+                * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+                */
                if (tm->op != MOD_LOG_ROOT_REPLACE)
                        break;
 
@@ -1087,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
                                                      tm->generation);
                        break;
                case MOD_LOG_KEY_ADD:
-                       if (tm->slot != n - 1) {
-                               o_dst = btrfs_node_key_ptr_offset(tm->slot);
-                               o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
-                               memmove_extent_buffer(eb, o_dst, o_src, p_size);
-                       }
+                       /* if a move operation is needed it's in the log */
                        n--;
                        break;
                case MOD_LOG_MOVE_KEYS:
@@ -1192,16 +1195,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        }
 
        tm = tree_mod_log_search(root->fs_info, logical, time_seq);
-       /*
-        * there was an item in the log when __tree_mod_log_oldest_root
-        * returned. this one must not go away, because the time_seq passed to
-        * us must be blocking its removal.
-        */
-       BUG_ON(!tm);
-
        if (old_root)
-               eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
-                                              root->nodesize);
+               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
        else
                eb = btrfs_clone_extent_buffer(root->node);
        btrfs_tree_read_unlock(root->node);
@@ -1216,7 +1211,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                btrfs_set_header_level(eb, old_root->level);
                btrfs_set_header_generation(eb, old_generation);
        }
-       __tree_mod_log_rewind(eb, time_seq, tm);
+       if (tm)
+               __tree_mod_log_rewind(eb, time_seq, tm);
+       else
+               WARN_ON(btrfs_header_level(eb) != 0);
        extent_buffer_get(eb);
 
        return eb;
@@ -2995,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 static void insert_ptr(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *path,
                       struct btrfs_disk_key *key, u64 bytenr,
-                      int slot, int level, int tree_mod_log)
+                      int slot, int level)
 {
        struct extent_buffer *lower;
        int nritems;
@@ -3008,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
-               if (tree_mod_log && level)
+               if (level)
                        tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
                                             slot, nritems - slot);
                memmove_extent_buffer(lower,
@@ -3016,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
-       if (tree_mod_log && level) {
+       if (level) {
                ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
                                              MOD_LOG_KEY_ADD);
                BUG_ON(ret < 0);
@@ -3104,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(split);
 
        insert_ptr(trans, root, path, &disk_key, split->start,
-                  path->slots[level + 1] + 1, level + 1, 1);
+                  path->slots[level + 1] + 1, level + 1);
 
        if (path->slots[level] >= mid) {
                path->slots[level] -= mid;
@@ -3641,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
        btrfs_set_header_nritems(l, mid);
        btrfs_item_key(right, &disk_key, 0);
        insert_ptr(trans, root, path, &disk_key, right->start,
-                  path->slots[1] + 1, 1, 0);
+                  path->slots[1] + 1, 1);
 
        btrfs_mark_buffer_dirty(right);
        btrfs_mark_buffer_dirty(l);
@@ -3848,7 +3846,7 @@ again:
                if (mid <= slot) {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                  path->slots[1] + 1, 1, 0);
+                                  path->slots[1] + 1, 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3857,7 +3855,7 @@ again:
                } else {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                         path->slots[1], 1, 0);
+                                         path->slots[1], 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -5121,6 +5119,18 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && time_seq) {
+                               /*
+                                * If we don't get the lock, we may be racing
+                                * with push_leaf_left, holding that lock while
+                                * itself waiting for the leaf we've currently
+                                * locked. To solve this situation, we give up
+                                * on our lock and cycle.
+                                */
+                               btrfs_release_path(path);
+                               cond_resched();
+                               goto again;
+                       }
                        if (!ret) {
                                btrfs_set_path_blocking(path);
                                btrfs_tree_read_lock(next);
index 7b845ff4af99fb643bd959e52dd6f202b7dee7ef..2936ca49b3b4af3a799f7585b74038d289ab8278 100644 (file)
@@ -2354,12 +2354,17 @@ retry_root_backup:
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
                goto recovery_tree_root;
-
        csum_root->track_dirty = 1;
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_recover_balance(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to recover balance\n");
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_dev_stats(fs_info);
        if (ret) {
                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2485,20 +2490,23 @@ retry_root_backup:
                goto fail_trans_kthread;
        }
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               err = btrfs_orphan_cleanup(fs_info->fs_root);
-               if (!err)
-                       err = btrfs_orphan_cleanup(fs_info->tree_root);
-               up_read(&fs_info->cleanup_work_sem);
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
 
-               if (!err)
-                       err = btrfs_recover_balance(fs_info->tree_root);
+       down_read(&fs_info->cleanup_work_sem);
+       if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+           (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+               up_read(&fs_info->cleanup_work_sem);
+               close_ctree(tree_root);
+               return ret;
+       }
+       up_read(&fs_info->cleanup_work_sem);
 
-               if (err) {
-                       close_ctree(tree_root);
-                       return err;
-               }
+       ret = btrfs_resume_balance_async(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to resume balance\n");
+               close_ctree(tree_root);
+               return ret;
        }
 
        return 0;
index 4b5a1e1bdefbe095c239b464e55c9699e865175b..6e1d36702ff71c4fc5bde2733cbf166376b726d3 100644 (file)
@@ -2347,12 +2347,10 @@ next:
        return count;
 }
 
-
 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
-                       unsigned long num_refs)
+                              unsigned long num_refs,
+                              struct list_head *first_seq)
 {
-       struct list_head *first_seq = delayed_refs->seq_head.next;
-
        spin_unlock(&delayed_refs->lock);
        pr_debug("waiting for more refs (num %ld, first %p)\n",
                 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
+       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
                                 */
                                consider_waiting = 1;
                                num_refs = delayed_refs->num_entries;
+                               first_seq = root->fs_info->tree_mod_seq_list.next;
                        } else {
-                               wait_for_more_refs(delayed_refs, num_refs);
+                               wait_for_more_refs(delayed_refs,
+                                                  num_refs, first_seq);
                                /*
                                 * after waiting, things have changed. we
                                 * dropped the lock and someone else might have
index aaa12c1eb3483a8371df48e5765b986c436451c4..01c21b6c6d43e44a28a4c862ca6532d6f8b02654 100644 (file)
@@ -3324,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
+       struct inode *inode = mapping->host;
        int ret = 0;
        int done = 0;
        int nr_to_write_done = 0;
@@ -3334,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        int scanned = 0;
        int tag;
 
+       /*
+        * We have to hold onto the inode so that ordered extents can do their
+        * work when the IO finishes.  The alternative to this is failing to add
+        * an ordered extent if the igrab() fails there and that is a huge pain
+        * to deal with, so instead just hold onto the inode throughout the
+        * writepages operation.  If it fails here we are freeing up the inode
+        * anyway and we'd rather not waste our time writing out stuff that is
+        * going to be truncated anyway.
+        */
+       if (!igrab(inode))
+               return 0;
+
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
@@ -3428,6 +3441,7 @@ retry:
                index = 0;
                goto retry;
        }
+       btrfs_add_delayed_iput(inode);
        return ret;
 }
 
index 70dc8ca73e257bc3a1e7a96ea48009bff093af9a..9aa01ec2138d466f3d1726ccd6291d054c5e5c98 100644 (file)
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    loff_t *ppos, size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
        struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
                                            count, ocount);
 
-       /*
-        * the generic O_DIRECT will update in-memory i_size after the
-        * DIOs are done.  But our endio handlers that update the on
-        * disk i_size never update past the in memory i_size.  So we
-        * need one more update here to catch any additions to the
-        * file
-        */
-       if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-               mark_inode_dirty(inode);
-       }
-
        if (written < 0 || written == count)
                return written;
 
index 81296c57405a5d53a27dba626a4d6201829bd578..6c4e2baa9290e16d06a4a8dfc5c3b05880cfeb27 100644 (file)
@@ -1543,29 +1543,26 @@ again:
        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
-        * XXX - this can go away after a few releases.
-        *
-        * since the only user of btrfs_remove_free_space is the tree logging
-        * stuff, and the only way to test that is under crash conditions, we
-        * want to have this debug stuff here just in case somethings not
-        * working.  Search the bitmap for the space we are trying to use to
-        * make sure its actually there.  If its not there then we need to stop
-        * because something has gone wrong.
+        * We need to search for bits in this bitmap.  We could only cover some
+        * of the extent in this bitmap thanks to how we add space, so we need
+        * to search for as much as it as we can and clear that amount, and then
+        * go searching for the next bit.
         */
        search_start = *offset;
-       search_bytes = *bytes;
+       search_bytes = ctl->unit;
        search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
-       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
-               *bytes -= end - *offset + 1;
-               *offset = end + 1;
-       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
-               *bytes = 0;
-       }
+       /* We may have found more bits than what we need */
+       search_bytes = min(search_bytes, *bytes);
+
+       /* Cannot clear past the end of the bitmap */
+       search_bytes = min(search_bytes, end - search_start + 1);
+
+       bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+       *offset += search_bytes;
+       *bytes -= search_bytes;
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
                 * everything over again.
                 */
                search_start = *offset;
-               search_bytes = *bytes;
+               search_bytes = ctl->unit;
                ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       if (!bytes)
+               goto out_lock;
+
        info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
@@ -1905,88 +1904,48 @@ again:
                }
        }
 
-       if (info->bytes < bytes && rb_next(&info->offset_index)) {
-               u64 end;
-               next_info = rb_entry(rb_next(&info->offset_index),
-                                            struct btrfs_free_space,
-                                            offset_index);
-
-               if (next_info->bitmap)
-                       end = next_info->offset +
-                             BITS_PER_BITMAP * ctl->unit - 1;
-               else
-                       end = next_info->offset + next_info->bytes;
-
-               if (next_info->bytes < bytes ||
-                   next_info->offset > offset || offset > end) {
-                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
-                             " trying to use %llu\n",
-                             (unsigned long long)info->offset,
-                             (unsigned long long)info->bytes,
-                             (unsigned long long)bytes);
-                       WARN_ON(1);
-                       ret = -EINVAL;
-                       goto out_lock;
-               }
-
-               info = next_info;
-       }
-
-       if (info->bytes == bytes) {
+       if (!info->bitmap) {
                unlink_free_space(ctl, info);
-               if (info->bitmap) {
-                       kfree(info->bitmap);
-                       ctl->total_bitmaps--;
-               }
-               kmem_cache_free(btrfs_free_space_cachep, info);
-               ret = 0;
-               goto out_lock;
-       }
-
-       if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(ctl, info);
-               info->offset += bytes;
-               info->bytes -= bytes;
-               ret = link_free_space(ctl, info);
-               WARN_ON(ret);
-               goto out_lock;
-       }
+               if (offset == info->offset) {
+                       u64 to_free = min(bytes, info->bytes);
+
+                       info->bytes -= to_free;
+                       info->offset += to_free;
+                       if (info->bytes) {
+                               ret = link_free_space(ctl, info);
+                               WARN_ON(ret);
+                       } else {
+                               kmem_cache_free(btrfs_free_space_cachep, info);
+                       }
 
-       if (!info->bitmap && info->offset <= offset &&
-           info->offset + info->bytes >= offset + bytes) {
-               u64 old_start = info->offset;
-               /*
-                * we're freeing space in the middle of the info,
-                * this can happen during tree log replay
-                *
-                * first unlink the old info and then
-                * insert it again after the hole we're creating
-                */
-               unlink_free_space(ctl, info);
-               if (offset + bytes < info->offset + info->bytes) {
-                       u64 old_end = info->offset + info->bytes;
+                       offset += to_free;
+                       bytes -= to_free;
+                       goto again;
+               } else {
+                       u64 old_end = info->bytes + info->offset;
 
-                       info->offset = offset + bytes;
-                       info->bytes = old_end - info->offset;
+                       info->bytes = offset - info->offset;
                        ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
-               } else {
-                       /* the hole we're creating ends at the end
-                        * of the info struct, just free the info
-                        */
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               }
-               spin_unlock(&ctl->tree_lock);
 
-               /* step two, insert a new info struct to cover
-                * anything before the hole
-                */
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
-               WARN_ON(ret); /* -ENOMEM */
-               goto out;
+                       /* Not enough bytes in this entry to satisfy us */
+                       if (old_end < offset + bytes) {
+                               bytes -= old_end - offset;
+                               offset = old_end;
+                               goto again;
+                       } else if (old_end == offset + bytes) {
+                               /* all done */
+                               goto out_lock;
+                       }
+                       spin_unlock(&ctl->tree_lock);
+
+                       ret = btrfs_add_free_space(block_group, offset + bytes,
+                                                  old_end - (offset + bytes));
+                       WARN_ON(ret);
+                       goto out;
+               }
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
index d8bb0dbc4941ca3f288119134df64eeb1eb679da..a7d1921ac76b8ee5d85d563c1ceed80f4a65a2dd 100644 (file)
@@ -3754,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+               BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                 &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
@@ -5876,8 +5876,17 @@ map:
        bh_result->b_size = len;
        bh_result->b_bdev = em->bdev;
        set_buffer_mapped(bh_result);
-       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
-               set_buffer_new(bh_result);
+       if (create) {
+               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       set_buffer_new(bh_result);
+
+               /*
+                * Need to update the i_size under the extent lock so buffered
+                * readers will get the updated i_size when we unlock.
+                */
+               if (start + len > i_size_read(inode))
+                       i_size_write(inode, start + len);
+       }
 
        free_extent_map(em);
 
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                 */
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
-               if (!ordered)
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   cached_state)))
                        break;
+
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                                     &cached_state, GFP_NOFS);
-               btrfs_start_ordered_extent(inode, ordered, 1);
-               btrfs_put_ordered_extent(ordered);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(file->f_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(file->f_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret) {
+                               if (ret == -EBUSY)
+                                       ret = 0;
+                               goto out;
+                       }
+               }
+
                cond_resched();
        }
 
index 497c530724cf6b7a50296d2c6660fef7f4066cb9..e440aa653c30d6f6c8ad1e7437bfa6e2b4d50799 100644 (file)
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
                                   struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
                              struct btrfs_ioctl_scrub_args)
index 0eb9a4da069e6cb1e3c4294d4ad02e2ab92da666..e23991574fdf309e6ed95e23985fc5dd0af28e0f 100644 (file)
@@ -1187,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
+               ret = btrfs_resume_balance_async(fs_info);
+               if (ret)
+                       goto restore;
+
                sb->s_flags &= ~MS_RDONLY;
        }
 
index 2017d0ff511ca3304dad46e85045ad2ab28d4e75..8abeae4224f92dbd870877b76224515c304e979d 100644 (file)
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -895,6 +897,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
index 8a3d2594b80726b3a4da08c5924cb51b8e28a0ba..ecaad40e7ef499fedb7667659d9efd955a661eaa 100644 (file)
@@ -2845,31 +2845,48 @@ out:
 
 static int balance_kthread(void *data)
 {
-       struct btrfs_balance_control *bctl =
-                       (struct btrfs_balance_control *)data;
-       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
-       set_balance_control(bctl);
-
-       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-               printk(KERN_INFO "btrfs: force skipping balance\n");
-       } else {
+       if (fs_info->balance_ctl) {
                printk(KERN_INFO "btrfs: continuing balance\n");
-               ret = btrfs_balance(bctl, NULL);
+               ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+
        return ret;
 }
 
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
 {
        struct task_struct *tsk;
+
+       spin_lock(&fs_info->balance_lock);
+       if (!fs_info->balance_ctl) {
+               spin_unlock(&fs_info->balance_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->balance_lock);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+               return 0;
+       }
+
+       tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
        struct btrfs_balance_control *bctl;
        struct btrfs_balance_item *item;
        struct btrfs_disk_balance_args disk_bargs;
@@ -2882,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        if (!path)
                return -ENOMEM;
 
-       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
-       if (!bctl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        key.objectid = BTRFS_BALANCE_OBJECTID;
        key.type = BTRFS_BALANCE_ITEM_KEY;
        key.offset = 0;
 
-       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret < 0)
-               goto out_bctl;
+               goto out;
        if (ret > 0) { /* ret = -ENOENT; */
                ret = 0;
-               goto out_bctl;
+               goto out;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
        }
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-       bctl->fs_info = tree_root->fs_info;
-       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+       bctl->fs_info = fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item);
+       bctl->flags |= BTRFS_BALANCE_RESUME;
 
        btrfs_balance_data(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2913,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
-       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
-       if (IS_ERR(tsk))
-               ret = PTR_ERR(tsk);
-       else
-               goto out;
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
 
-out_bctl:
-       kfree(bctl);
+       set_balance_control(bctl);
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
 out:
        btrfs_free_path(path);
        return ret;
@@ -4061,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
-                       if (bio->bi_rw & WRITE)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_WRITE_ERRS);
-                       else
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_READ_ERRS);
-                       if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_FLUSH_ERRS);
-                       btrfs_dev_stat_print_on_error(dev);
+                       if (dev->bdev) {
+                               if (bio->bi_rw & WRITE)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_WRITE_ERRS);
+                               else
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_READ_ERRS);
+                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_FLUSH_ERRS);
+                               btrfs_dev_stat_print_on_error(dev);
+                       }
                }
        }
 
index 74366f27a76bbc7272e4b41f2f92bf9520bca813..95f6637614db5932f8d52daba79943514a7ed8da 100644 (file)
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_balance_control *bctl,
                  struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
index c9f1318a3b820b363526576036c4894205552921..7bf08fa22ec9ab122bad5438a02bd78db6f1e885 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index ba43f408baa38907a8f63a64314e07bb622a6e7f..07954b05b86cc3ac9c4f72aa97584b551914631e 100644 (file)
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
index c4464356b35b0af21eaafe6cbd1d2d7b4f549814..96c158a37d3e5ead53765e4bfa2280a82c79be5e 100644 (file)
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
 
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        return -EINVAL;
 }
index 26e5b613deda9e63816b58c59f53f06d7dccd50c..09a545a7dfa39bcd7f736f446358d4c3b112aae4 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index e4652fe5895863d956a52e436d10f1fc3724415b..fecdf31816f2fc6a834a060cfdd39158dcb30bc2 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc14ccfde5bd85c3b08f990487af66ab072..2c5d2b4d5d1eb51542df26094700250ab6191070 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
index ab56a1764d4db8c6a5136cd3b636dc330648783b..e8cd2027abbd1e2e0ed6d432974816cef2845a42 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index 49249c28690dbb21b6914e2907b16af56e1094c9..a7fa0702be1cd5b269ed6f15e9624f4b02968b11 100644 (file)
@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index a15a466d0d1d14b21bd82de2bfd3df31654420db..4ce02e0673db612befb63def38911d2d2b0638af 100644 (file)
@@ -1594,6 +1594,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1682,7 +1683,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
index 6df214041a5e5bfe85b948b41ce56e15af514860..84f01ba81a34655becfd3be7af5fed76251d21ca 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
index d78671e9d545be838f9ab5140c9ee07fb1309c26..46a3d23d259e51c17140798554874e929145d6bb 100644 (file)
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
index 66e4fcdd1c6b4a6a07a7d26f3a66bc7e43183807..a4bb856de08f528ae564ab68fd2e83e6f779075f 100644 (file)
@@ -1342,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1354,10 +1353,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_stop_poll(sdata);
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1377,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1386,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
index 7bcecf73aafbf9dadb87a2c8a114e3eb92490131..965e6ec0adb6c12393ced183a44463a63e09a9ff 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index 819c342f5b3012b6a4e37f60414fb835fc0041d8..9730882697aaedbab0beee66f0f12a654b97b63a 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee863943c8267286e4b16e52400dd40bf51b4f26..d5d3607ae7bcf5e9704bd189217115cf048aee4b 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8dd97c66a9da387a4e37073c47e8dc4407..d43e3c122f7bdc5b042b92ce74981149922faa78 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
index 3e797d1fcb94272ad96b9798e91a6e6467031379..791d56bbd74a7613e4b1d87ba74bcf2a8a135e46 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index cb2646179e5f8b8d1836499579d1ccfd109fad60..2ab196a9f228ac873238c2a060685bacea62b87a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c9e07fcd34fc5d6116e1a4aef9dedf6f23..8b8a6a2b2badaf61e9c71a174809ca989438668f 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 5bc9ab161b373eadf51843ebaa77c527716a2122..b16517ee1aaf7cfad94978ed1181df6432da6f07 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index f1b7d4bb591e9b648865c4e096ef838e77678442..6ae47acaaec65ceb898e10e462454e667a8e739e 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index a85eeeb55dd0022e009895c53a6d8593929b7691..b6de71efb140c538a66e0a7901df2b4402a85bf4 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a81cf4209f6d3ca9b6762fefa47e75867c3..8716da1a859221dc96d206ed517190e9e9cfa2ca 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b026ba0c69922e09eb5c150298f650ea7bb3d1c4..1dcceb6e0ce6c2b9e5f6c0c3abf8075c3e75224c 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40cebc981ae7962bb9cd20ae84823b70d43cf..b5fb7c409023ff8adea81d41e68ace60781febae 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017834c211a840e83c1e39bebdbb001ec5c4..33d894776192205cd4b4a9573ccf70664c723b2d 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7f2ffbd5a219759f55bdc546f2edbf19b0..f5a6a4f4faf721af4874538093cb003f4efc202c 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 3efc9b12aef44016201b02eeafcc10140f17a240..860aeb349cb337bbccf4346d99120d4d1fd51c90 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/mount.h>
 #include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
index 5ccf10a4d5932b187bba679e40e8f424d4739551..aa4c25e0f3277fce38520c72c9759bfc90b9022d 100644 (file)
@@ -6688,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6707,6 +6732,9 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
+
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
        alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
index 64d2a4fa34b27d81ad84d0e64ed625e04e293b7a..e9b62b5ea637580ea7e8904ec2c68662ed1471fc 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb60683e7b5605b38e373c7f1c75f4158c9..08c7f6685ff0937a367824c7a731fd4fb8e48d72 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index acbdc5fde9236be0e43a5767bd0135eb92450e68..32682c1b7cdece413693db6f6487ae4df640da09 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index f59c1e8de7a2e62b5977d90ab992e1c2240e80f5..7d7e2aaffece234a81cef3f4181cc7ffe5bb14cb 100644 (file)
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
 }
 
 static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                return -ENOMEM;
 
        irqfd->kvm = kvm;
-       irqfd->gsi = gsi;
+       irqfd->gsi = args->gsi;
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
 
-       file = eventfd_fget(fd);
+       file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
                ret = PTR_ERR(file);
                goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
  * shutdown any irqfd's that match fd+gsi
  */
 static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct _irqfd *irqfd, *tmp;
        struct eventfd_ctx *eventfd;
 
-       eventfd = eventfd_ctx_fdget(fd);
+       eventfd = eventfd_ctx_fdget(args->fd);
        if (IS_ERR(eventfd))
                return PTR_ERR(eventfd);
 
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+               if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
                         * another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
 }
 
 int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       if (flags & KVM_IRQFD_FLAG_DEASSIGN)
-               return kvm_irqfd_deassign(kvm, fd, gsi);
+       if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+               return -EINVAL;
+
+       if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+               return kvm_irqfd_deassign(kvm, args);
 
-       return kvm_irqfd_assign(kvm, fd, gsi);
+       return kvm_irqfd_assign(kvm, args);
 }
 
 /*
index 7e140683ff14d503a9714058cadd9dde7e4ffaf9..44ee7124b16dae1820ca1ca1c79f2a099979da12 100644 (file)
@@ -2047,7 +2047,7 @@ static long kvm_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&data, argp, sizeof data))
                        goto out;
-               r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+               r = kvm_irqfd(kvm, &data);
                break;
        }
        case KVM_IOEVENTFD: {
@@ -2845,6 +2845,7 @@ void kvm_exit(void)
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        free_cpumask_var(cpus_hardware_enabled);
+       __free_page(fault_page);
        __free_page(hwpoison_page);
        __free_page(bad_page);
 }