]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'acpi-4.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 2 Jun 2017 23:36:23 +0000 (16:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 2 Jun 2017 23:36:23 +0000 (16:36 -0700)
Pull ACPI fixes from Rafael Wysocki:
 "These revert one more problematic commit related to the ACPI-based
  handling of laptop lids and make some unuseful error messages coming
  from ACPICA go away.

  Specifics:

   - Revert one more commit related to the ACPI-based handling of laptop
     lids that changed the default behavior on laptops that booted with
     closed lids and introduced a regression there (Benjamin Tissoires).

   - Add a missing acpi_put_table() to the code implementing the
     /sys/firmware/acpi/tables interface to prevent a counter in the
     ACPICA core from overflowing (Dan Williams).

   - Drop error messages printed by ACPICA on acpi_get_table() reference
     counting mismatches as they need not indicate real errors at this
     point (Lv Zheng)"

* tag 'acpi-4.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPICA: Tables: Fix regression introduced by a too early mechanism enabling
  Revert "ACPI / button: Change default behavior to lid_init_state=open"
  ACPI / sysfs: fix acpi_get_table() leak / acpi-sysfs denial of service

158 files changed:
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/pci.c
arch/frv/include/asm/timex.h
arch/mips/kernel/process.c
arch/openrisc/kernel/process.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/process_32.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pat.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/random.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/dma/ep93xx_dma.c
drivers/dma/mv_xor_v2.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/efi/libstub/secureboot.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/perf/arm_pmu_acpi.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-mxs.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinmux.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
fs/dax.c
fs/gfs2/log.c
fs/nfs/namespace.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfsxdr.c
fs/ntfs/namei.c
fs/ocfs2/export.c
fs/overlayfs/Kconfig
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/reiserfs/journal.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
include/drm/drm_dp_helper.h
include/linux/gfp.h
include/linux/gpio/machine.h
include/linux/jiffies.h
include/linux/memblock.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/pinctrl/pinconf-generic.h
include/linux/sunrpc/svc.h
include/target/iscsi/iscsi_target_core.h
kernel/livepatch/Kconfig
mm/gup.c
mm/hugetlb.c
mm/ksm.c
mm/memblock.c
mm/memory-failure.c
mm/memory.c
mm/mlock.c
mm/page_alloc.c
mm/slub.c
mm/util.c
scripts/gdb/linux/dmesg.py
sound/pci/hda/patch_realtek.c
sound/usb/mixer_us16x08.c
usr/Kconfig

index 71a3c134af1b25d58613699aa465d17cff2c23d0..f01d154090dab1b3a8a58fee6a599459392aa1f8 100644 (file)
@@ -247,7 +247,6 @@ bias-bus-hold               - latch weakly
 bias-pull-up           - pull up the pin
 bias-pull-down         - pull down the pin
 bias-pull-pin-default  - use pin-default pull state
-bi-directional         - pin supports simultaneous input/output operations
 drive-push-pull                - drive actively high and low
 drive-open-drain       - drive with open drain
 drive-open-source      - drive with open source
@@ -260,7 +259,6 @@ input-debounce              - debounce mode with debound time X
 power-source           - select between different power supplies
 low-power-enable       - enable low power mode
 low-power-disable      - disable low power mode
-output-enable          - enable output on pin regardless of output value
 output-low             - set the pin to output mode with low level
 output-high            - set the pin to output mode with high level
 slew-rate              - set the slew rate
index 0e99978da3f05013d145132950ad204a92e3e4b0..59cca1d6ec547270adbd56a4e2265b9f9fc34375 100644 (file)
@@ -23,9 +23,9 @@
 #define ACPI_MADT_GICC_LENGTH  \
        (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
 
-#define BAD_MADT_GICC_ENTRY(entry, end)                                                \
-       (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||       \
-        (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end)                                        \
+       (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+       (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
index 4f0e3ebfea4b4f6496a783bd172abc05e7ec1d4a..c7e3e6387a4910a6377d78e10caf98cc1c20243b 100644 (file)
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                return NULL;
 
        root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-       if (!root_ops)
+       if (!root_ops) {
+               kfree(ri);
                return NULL;
+       }
 
        ri->cfg = pci_acpi_setup_ecam_mapping(root);
        if (!ri->cfg) {
index a89bddefdacf9194373a201c6c6c4cf8c4ac87c0..139093fab3260debefb4da2fbd33e37778784298 100644 (file)
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
 #define vxtime_lock()          do {} while (0)
 #define vxtime_unlock()                do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data      __attribute__((__section__(".data")))
+
 #endif
 
index 918d4c73e951d7815fc4063322c9ed8e493afb9f..5351e1f3950d158aaa5ff2590c32734862a79834 100644 (file)
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        unsigned long childksp;
-       p->set_child_tid = p->clear_child_tid = NULL;
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
index f8da545854f979c33a7b3116d26d822caa46c494..106859ae27ffba114f9f4b0011151db0f65f98d4 100644 (file)
@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 
        top_of_kernel_stack = sp;
 
-       p->set_child_tid = p->clear_child_tid = NULL;
-
        /* Locate userspace context on stack... */
        sp -= STACK_FRAME_OVERHEAD;     /* redzone */
        sp -= sizeof(struct pt_regs);
index 45db4d2ebd0118e666c205185b9162d13e33316e..e9f4d762aa5b5cabde501f95045fad6c43fef54b 100644 (file)
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
 
 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
        if (!desc.mc)
                return -EINVAL;
 
-       ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
-                                desc.data, desc.size);
+       ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
 
 #ifdef CONFIG_X86_32
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (save) {
+               struct ucode_patch *p = find_patch(0);
                if (p) {
                        memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
                        memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 {
        char fw_name[36] = "amd-ucode/microcode_amd.bin";
        struct cpuinfo_x86 *c = &cpu_data(cpu);
+       bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
        enum ucode_state ret = UCODE_NFOUND;
        const struct firmware *fw;
 
        /* reload ucode container only on the boot cpu */
-       if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+       if (!refresh_fw || !bsp)
                return UCODE_OK;
 
        if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+       ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index ff40e74c9181f0e009b51909a0e76ce25c1c2cf3..ffeae818aa7a95ffd0395ae9af5fcedd9b599981 100644 (file)
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
        printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
        printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
-               smp_processor_id());
+               raw_smp_processor_id());
 
        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->ax, regs->bx, regs->cx, regs->dx);
index c329d28949056e2d6ad4688e411c5644b81e4d68..d24c8742d9b0aa6df35d5e479e627ff008ea221f 100644 (file)
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+       preempt_disable();
        kvm_x86_ops->cancel_hv_timer(apic->vcpu);
        apic->lapic_timer.hv_timer_in_use = false;
+       preempt_enable();
 }
 
 static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
                kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+       if (kvm_vcpu_is_reset_bsp(vcpu) &&
+           kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                kvm_lapic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
index 183ddb235fb48658028433d451db75d554152c54..ba9891ac5c568f1798555bfa9dcbc421fff5ae2a 100644 (file)
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
         * AMD's VMCB does not have an explicit unusable field, so emulate it
         * for cross vendor migration purposes by "not present"
         */
-       var->unusable = !var->present || (var->type == 0);
+       var->unusable = !var->present;
 
        switch (seg) {
        case VCPU_SREG_TR:
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               /* This is symmetric with svm_set_segment() */
                var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        s->base = var->base;
        s->limit = var->limit;
        s->selector = var->selector;
-       if (var->unusable)
-               s->attrib = 0;
-       else {
-               s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-               s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-               s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-               s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-               s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-               s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-               s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-       }
+       s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+       s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+       s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+       s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+       s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+       s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+       s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+       s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
        /*
         * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
         * would entail passing the CPL to userspace and back.
         */
        if (seg == VCPU_SREG_SS)
-               svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+               /* This is symmetric with svm_get_segment() */
+               svm->vmcb->save.cpl = (var->dpl & 3);
 
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
index 72f78396bc0960968161b66ccee00c42fa203fb7..9b4b5d6dcd34755acc0c09525ca93b3408ee4128 100644 (file)
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-                                 gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
-       gpa_t vmptr;
        struct x86_exception e;
-       struct page *page;
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-                               sizeof(vmptr), &e)) {
+       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+                               sizeof(*vmpointer), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
 
-       switch (exit_reason) {
-       case EXIT_REASON_VMON:
-               /*
-                * SDM 3: 24.11.5
-                * The first 4 bytes of VMXON region contain the supported
-                * VMCS revision identifier
-                *
-                * Note - IA32_VMX_BASIC[48] will never be 1
-                * for the nested case;
-                * which replaces physical address width with 32
-                *
-                */
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               page = nested_get_page(vcpu, vmptr);
-               if (page == NULL) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-                       kunmap(page);
-                       nested_release_page_clean(page);
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               kunmap(page);
-               nested_release_page_clean(page);
-               vmx->nested.vmxon_ptr = vmptr;
-               break;
-       case EXIT_REASON_VMCLEAR:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       case EXIT_REASON_VMPTRLD:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       default:
-               return 1; /* shouldn't happen */
-       }
-
-       if (vmpointer)
-               *vmpointer = vmptr;
        return 0;
 }
 
@@ -7066,6 +6990,8 @@ out_msr_bitmap:
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
+       gpa_t vmptr;
+       struct page *page;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
+
+       /*
+        * SDM 3: 24.11.5
+        * The first 4 bytes of VMXON region contain the supported
+        * VMCS revision identifier
+        *
+        * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+        * which replaces physical address width with 32
+        */
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       page = nested_get_page(vcpu, vmptr);
+       if (page == NULL) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+               kunmap(page);
+               nested_release_page_clean(page);
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       kunmap(page);
+       nested_release_page_clean(page);
+
+       vmx->nested.vmxon_ptr = vmptr;
        ret = enter_vmx_operation(vcpu);
        if (ret)
                return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
 
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmx->nested.current_vmptr != vmptr) {
                struct vmcs12 *new_vmcs12;
                struct page *page;
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        int cr = exit_qualification & 15;
-       int reg = (exit_qualification >> 8) & 15;
-       unsigned long val = kvm_register_readl(vcpu, reg);
+       int reg;
+       unsigned long val;
 
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
+               reg = (exit_qualification >> 8) & 15;
+               val = kvm_register_readl(vcpu, reg);
                switch (cr) {
                case 0:
                        if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
                 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
                 * cr0. Other attempted changes are ignored, with no exit.
                 */
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
                if (vmcs12->cr0_guest_host_mask & 0xe &
                    (val ^ vmcs12->cr0_read_shadow))
                        return true;
index 02363e37d4a61e8271d7fed0a8c534e9dd90f264..a2cd0997343c485051e849551b9fc9d904177fe0 100644 (file)
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
        if (vcpu->arch.pv.pv_unhalted)
                return true;
 
-       if (atomic_read(&vcpu->arch.nmi_queued))
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+           (vcpu->arch.nmi_pending &&
+            kvm_x86_ops->nmi_allowed(vcpu)))
                return true;
 
-       if (kvm_test_request(KVM_REQ_SMI, vcpu))
+       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+           (vcpu->arch.smi_pending && !is_smm(vcpu)))
                return true;
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
index 83a59a67757a77b46f7b7788074294a3a3c8a10c..9b78685b66e663c80ec3a68986a39024fd372b3c 100644 (file)
@@ -65,11 +65,9 @@ static int __init nopat(char *str)
 }
 early_param("nopat", nopat);
 
-static bool __read_mostly __pat_initialized = false;
-
 bool pat_enabled(void)
 {
-       return __pat_initialized;
+       return !!__pat_enabled;
 }
 EXPORT_SYMBOL_GPL(pat_enabled);
 
@@ -227,14 +225,13 @@ static void pat_bsp_init(u64 pat)
        }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
-       __pat_initialized = true;
 
        __init_cache_modes(pat);
 }
 
 static void pat_ap_init(u64 pat)
 {
-       if (!this_cpu_has(X86_FEATURE_PAT)) {
+       if (!boot_cpu_has(X86_FEATURE_PAT)) {
                /*
                 * If this happens we are on a secondary CPU, but switched to
                 * PAT on the boot CPU. We have no way to undo PAT.
@@ -309,7 +306,7 @@ void pat_init(void)
        u64 pat;
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       if (!__pat_enabled) {
+       if (!pat_enabled()) {
                init_cache_modes();
                return;
        }
index 7e76a4d8304bc5add30e5f86d16e4f5b423a24f6..43b96f5f78ba8c9c323c5ae6090c19f3ae290ad0 100644 (file)
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
 
        /*
         * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI
+        * non-native EFI. With efi=old_map, we don't do runtime services in
+        * kexec kernel because in the initial boot something else might
+        * have been mapped at these virtual addresses.
         */
-       if (!efi_is_native()) {
+       if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
                efi_memmap_unmap();
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
index c488625c9712de4fe150d01df5c260e650967265..eb8dff15a7f63721d0f07845263da1af75670771 100644 (file)
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
 
 pgd_t * __init efi_call_phys_prolog(void)
 {
-       unsigned long vaddress;
-       pgd_t *save_pgd;
+       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+       pgd_t *save_pgd, *pgd_k, *pgd_efi;
+       p4d_t *p4d, *p4d_k, *p4d_efi;
+       pud_t *pud;
 
        int pgd;
-       int n_pgds;
+       int n_pgds, i, j;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                save_pgd = (pgd_t *)read_cr3();
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
        n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
        save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
 
+       /*
+        * Build 1:1 identity mapping for efi=old_map usage. Note that
+        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+        * This means here we can only reuse the PMD tables of the direct mapping.
+        */
        for (pgd = 0; pgd < n_pgds; pgd++) {
-               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
-               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
-               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+               pgd_efi = pgd_offset_k(addr_pgd);
+               save_pgd[pgd] = *pgd_efi;
+
+               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+               if (!p4d) {
+                       pr_err("Failed to allocate p4d table!\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       addr_p4d = addr_pgd + i * P4D_SIZE;
+                       p4d_efi = p4d + p4d_index(addr_p4d);
+
+                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+                       if (!pud) {
+                               pr_err("Failed to allocate pud table!\n");
+                               goto out;
+                       }
+
+                       for (j = 0; j < PTRS_PER_PUD; j++) {
+                               addr_pud = addr_p4d + j * PUD_SIZE;
+
+                               if (addr_pud > (max_pfn << PAGE_SHIFT))
+                                       break;
+
+                               vaddr = (unsigned long)__va(addr_pud);
+
+                               pgd_k = pgd_offset_k(vaddr);
+                               p4d_k = p4d_offset(pgd_k, vaddr);
+                               pud[j] = *pud_offset(p4d_k, vaddr);
+                       }
+               }
        }
 out:
        __flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        /*
         * After the lock is released, the original page table is restored.
         */
-       int pgd_idx;
+       int pgd_idx, i;
        int nr_pgds;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
-       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
+               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+                       continue;
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       p4d = p4d_offset(pgd,
+                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                               continue;
+
+                       pud = (pud_t *)p4d_page_vaddr(*p4d);
+                       pud_free(&init_mm, pud);
+               }
+
+               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+               p4d_free(&init_mm, p4d);
+       }
+
        kfree(save_pgd);
 
        __flush_tlb_all();
index 26615991d69cc8b024470c921aca227c2756e439..e0cf95a83f3fab918eb73715d188e631e02a1425 100644 (file)
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
                free_bootmem_late(start, size);
        }
 
+       if (!num_entries)
+               return;
+
        new_size = efi.memmap.desc_size * num_entries;
        new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
index 7c2947128f5813a677a0361eddcd277b5946d03e..0480892e97e501807a7f14f843eb549719f33c81 100644 (file)
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->blkcg != &blkcg_root)
-               blk_exit_rl(&blkg->rl);
+               blk_exit_rl(blkg->q, &blkg->rl);
 
        blkg_rwstat_exit(&blkg->stat_ios);
        blkg_rwstat_exit(&blkg->stat_bytes);
index c7068520794bd0ba060b905f850efaae6a8cbd36..a7421b772d0e0e3f4b8372fbc11aefd83763d30a 100644 (file)
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
        if (!rl->rq_pool)
                return -ENOMEM;
 
+       if (rl != &q->root_rl)
+               WARN_ON_ONCE(!blk_get_queue(q));
+
        return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-       if (rl->rq_pool)
+       if (rl->rq_pool) {
                mempool_destroy(rl->rq_pool);
+               if (rl != &q->root_rl)
+                       blk_put_queue(q);
+       }
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
index f2224ffd225da8acb9b4775a19125f015cc6ab0a..1bcccedcc74f0b48f58363640acb1eae04704800 100644 (file)
@@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+                                                       int nr_hw_queues)
 {
        struct request_queue *q;
 
@@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       mutex_lock(&set->tag_list_lock);
+       __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+       mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */
index 712b018e9f5496de893440646e7942e2c752b3b8..283da7fbe03408d9eef71ba3e1a4f863671d761b 100644 (file)
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_free_queue_stats(q->stats);
 
-       blk_exit_rl(&q->root_rl);
+       blk_exit_rl(q, &q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
index 2ed70228e44fc706e6efee71ca000e5e47433217..83c8e1100525f7dd80b9a75e83cd2f8efb0f5969 100644 (file)
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
                gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
index da69b079725fbf62a407db76f7c5c430c52be3f9..b7e9c7feeab2acbd1a846d0c31285460aba076ec 100644 (file)
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY         (NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+       if (!iops_mode(cfqd))
+               return CFQ_SLICE_MODE_GROUP_DELAY;
+       else
+               return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
        n = rb_last(&st->rb);
        if (n) {
                __cfqg = rb_entry_cfqg(n);
-               cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+               cfqg->vdisktime = __cfqg->vdisktime +
+                       cfq_get_cfqg_vdisktime_delay(cfqd);
        } else
                cfqg->vdisktime = st->min_vdisktime;
        cfq_group_service_tree_add(st, cfqg);
index 9a7bb2c2944772cad8124a965bacc17d0aa8f935..f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718 100644 (file)
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        return -ENOSPC;
 }
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-       nbd->config = NULL;
-       nbd->tag_set.timeout = 0;
-       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 {
        if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
                        }
                        kfree(config->socks);
                }
-               nbd_reset(nbd);
+               kfree(nbd->config);
+               nbd->config = NULL;
+
+               nbd->tag_set.timeout = 0;
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
-       nbd_reset(nbd);
        add_disk(disk);
        nbd_total_devices++;
        return index;
index 454bf9c34882f33d673ccbaf0c8afa4f3ee18ad4..c16f74547804ccb957275f6d59b705b0ba35eb6b 100644 (file)
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 
        switch (req_op(rq)) {
        case REQ_OP_DISCARD:
+       case REQ_OP_WRITE_ZEROES:
                op_type = OBJ_OP_DISCARD;
                break;
        case REQ_OP_WRITE:
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.discard_granularity = segment_size;
        q->limits.discard_alignment = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
                q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
index d4dbd8d8e524d7b712f9668cbee57c7d722440b2..382c864814d944c79e610eaa434bc356d12bd335 100644 (file)
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 
        rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        for (i = 0; i < bytes_to_write; i++) {
                rc = wait_for_bulk_out_ready(dev);
                if (rc <= 0) {
-                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
                               rc);
                        DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                        if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
index 0ab0249189072befe3cee1b8696052727f360540..a561f0c2f428df6cbd80e0fe5bfd3c479a578e18 100644 (file)
@@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        __u32 *ptr = (__u32 *) regs;
+       unsigned long flags;
 
        if (regs == NULL)
                return 0;
+       local_irq_save(flags);
        if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
                f->reg_idx = 0;
-       return *(ptr + f->reg_idx++);
+       ptr += f->reg_idx++;
+       local_irq_restore(flags);
+       return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
index 0e3f6496524d92c7c1717d8d2259684952d7acb8..26b643d57847de0fca4afc099f4bec49d6326be9 100644 (file)
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
            list_empty(&cpufreq_policy_list)) {
                /* if all ->init() calls failed, unregister */
+               ret = -ENODEV;
                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
                         driver_data->name);
                goto err_if_unreg;
index 1b9bcd76c60e334e72a31a3b6ff1a21410ac23ff..c2dd43f3f5d8a3092e6847f18d124f0631bdf065 100644 (file)
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                return PTR_ERR(priv.cpu_clk);
        }
 
-       clk_prepare_enable(priv.cpu_clk);
+       err = clk_prepare_enable(priv.cpu_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare cpuclk\n");
+               return err;
+       }
+
        kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
 
        priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                goto out_cpu;
        }
 
-       clk_prepare_enable(priv.ddr_clk);
+       err = clk_prepare_enable(priv.ddr_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare ddrclk\n");
+               goto out_cpu;
+       }
        kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
 
        priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                err = PTR_ERR(priv.powersave_clk);
                goto out_ddr;
        }
-       clk_prepare_enable(priv.powersave_clk);
+       err = clk_prepare_enable(priv.powersave_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare powersave clk\n");
+               goto out_ddr;
+       }
 
        of_node_put(np);
        np = NULL;
index d37e8dda807900fe9725aa153c20c1c2bc927a52..ec240592f5c8e7a450e2c26c20feece9d12dafad 100644 (file)
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
        struct dma_device       dma_dev;
        bool                    m2m;
        int                     (*hw_setup)(struct ep93xx_dma_chan *);
+       void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
        void                    (*hw_submit)(struct ep93xx_dma_chan *);
        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
                | M2P_CONTROL_ENABLE;
        m2p_set_control(edmac, control);
 
+       edmac->buffer = 0;
+
        return 0;
 }
 
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+       unsigned long flags;
        u32 control;
 
+       spin_lock_irqsave(&edmac->lock, flags);
        control = readl(edmac->regs + M2P_CONTROL);
        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
        m2p_set_control(edmac, control);
+       spin_unlock_irqrestore(&edmac->lock, flags);
 
        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-               cpu_relax();
+               schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
        m2p_set_control(edmac, 0);
 
-       while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-               cpu_relax();
+       while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+               dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1160,6 +1169,26 @@ fail:
        return NULL;
 }
 
+/**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+       if (edmac->edma->hw_synchronize)
+               edmac->edma->hw_synchronize(edmac);
+}
+
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
        dma_dev->device_config = ep93xx_dma_slave_config;
+       dma_dev->device_synchronize = ep93xx_dma_synchronize;
        dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
        dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        } else {
                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+               edma->hw_synchronize = m2p_hw_synchronize;
                edma->hw_setup = m2p_hw_setup;
                edma->hw_shutdown = m2p_hw_shutdown;
                edma->hw_submit = m2p_hw_submit;
index a28a01fcba674dc569e4d49ca6fd50def5a58645..f3e211f8f6c58c00080703f11b25937bb36dab39 100644 (file)
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
        struct mv_xor_v2_sw_desc *sw_desq;
        int desc_size;
        unsigned int npendings;
+       unsigned int hw_queue_idx;
 };
 
 /**
@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
        }
 }
 
-/*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-       /* read the index for the next available descriptor in the DESQ */
-       u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-       return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-               & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
 /*
  * notify the engine of new descriptors, and update the available index.
  */
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
        return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-       u32 reg;
-
-       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-       reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-       reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
        struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
        if (!ndescs)
                return IRQ_NONE;
 
-       /*
-        * Update IMSG threshold, to disable new IMSG interrupts until
-        * end of the tasklet
-        */
-       mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
        /* schedule a tasklet to handle descriptors callbacks */
        tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-       int desq_ptr;
        void *dest_hw_desc;
        dma_cookie_t cookie;
        struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_bh(&xor_dev->lock);
        cookie = dma_cookie_assign(tx);
 
-       /* get the next available slot in the DESQ */
-       desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
        /* copy the HW descriptor from the SW descriptor to the DESQ */
-       dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+       dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
        xor_dev->npendings++;
+       xor_dev->hw_queue_idx++;
+       if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+               xor_dev->hw_queue_idx = 0;
 
        spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc     *
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
        struct mv_xor_v2_sw_desc *sw_desc;
+       bool found = false;
 
        /* Lock the channel */
        spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
                return NULL;
        }
 
-       /* get a free SW descriptor from the SW DESQ */
-       sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-                                  struct mv_xor_v2_sw_desc, free_list);
+       list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+               if (async_tx_test_ack(&sw_desc->async_tx)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               spin_unlock_bh(&xor_dev->lock);
+               return NULL;
+       }
+
        list_del(&sw_desc->free_list);
 
        /* Release the channel */
        spin_unlock_bh(&xor_dev->lock);
 
-       /* set the async tx descriptor */
-       dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-       sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-       async_tx_ack(&sw_desc->async_tx);
-
        return sw_desc;
 }
 
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
                __func__, len, &src, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
                __func__, src_cnt, len, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
                container_of(chan, struct mv_xor_v2_device, dmachan);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        /* set the HW descriptor */
        hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
 {
        struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
        int pending_ptr, num_of_pending, i;
-       struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
        /* get the pending descriptors parameters */
        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-       /* next HW descriptor */
-       next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
        /* loop over free descriptors */
        for (i = 0; i < num_of_pending; i++) {
-
-               if (pending_ptr > MV_XOR_V2_DESC_NUM)
-                       pending_ptr = 0;
-
-               if (next_pending_sw_desc != NULL)
-                       next_pending_hw_desc++;
+               struct mv_xor_v2_descriptor *next_pending_hw_desc =
+                       xor_dev->hw_desq_virt + pending_ptr;
 
                /* get the SW descriptor related to the HW descriptor */
                next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
 
                /* increment the next descriptor */
                pending_ptr++;
+               if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+                       pending_ptr = 0;
        }
 
        if (num_of_pending != 0) {
                /* free the descriptores */
                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
        }
-
-       /* Update IMSG threshold, to enable new IMSG interrupts */
-       mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-       /* enable the DMA engine */
-       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
        /*
         * This is a temporary solution, until we activate the
         * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+       /* enable the DMA engine */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
        return 0;
 }
 
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, xor_dev);
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
+
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        /* add all SW descriptors to the free list */
        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-               xor_dev->sw_desq[i].idx = i;
-               list_add(&xor_dev->sw_desq[i].free_list,
+               struct mv_xor_v2_sw_desc *sw_desc =
+                       xor_dev->sw_desq + i;
+               sw_desc->idx = i;
+               dma_async_tx_descriptor_init(&sw_desc->async_tx,
+                                            &xor_dev->dmachan);
+               sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+               async_tx_ack(&sw_desc->async_tx);
+
+               list_add(&sw_desc->free_list,
                         &xor_dev->free_sw_desc);
        }
 
index 8b0da7fa520d27ac514228130c354f121a12b848..e90a7a0d760af6d031fa465208e88f2dd6f056b4 100644 (file)
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
 
        for (i = 0; i < AMBA_NR_IRQS; i++) {
                irq = adev->irq[i];
-               devm_free_irq(&adev->dev, irq, pl330);
+               if (irq)
+                       devm_free_irq(&adev->dev, irq, pl330);
        }
 
        dma_async_device_unregister(&pl330->ddma);
index db41795fe42ae6ed355de41f12b5c90ea661bde4..bd261c9e9664b6ac951939641091bc0bb7466380 100644 (file)
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        if (desc->hwdescs.use) {
                dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
                        RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               if (dptr == 0)
+                       dptr = desc->nchunks;
+               dptr--;
                WARN_ON(dptr >= desc->nchunks);
        } else {
                running = desc->running;
index 72c649713aceecd75a20957522792702fca1696a..31a145154e9f26a8562e51223cffa50e65352075 100644 (file)
@@ -117,7 +117,7 @@ struct usb_dmac {
 #define USB_DMASWR                     0x0008
 #define USB_DMASWR_SWR                 (1 << 0)
 #define USB_DMAOR                      0x0060
-#define USB_DMAOR_AE                   (1 << 2)
+#define USB_DMAOR_AE                   (1 << 1)
 #define USB_DMAOR_DME                  (1 << 0)
 
 #define USB_DMASAR                     0x0000
index 44c01390d0353fd3170fc797eb4ce6393229bd14..dc269cb288c209d60e780eff287af2930fb4c477 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0400, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,       0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+       ADD_DMI_ATTR(product_family,      DMI_PRODUCT_FAMILY);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54be60ead08f8068c18dc9bbfd5a40e3cb26685c..93f7acdaac7ac19c057fc6b98a76c270a4646f24 100644 (file)
@@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
                dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
index 04ca8764f0c096f4e3f006ab74e4dc55996735a1..8bf27323f7a37c34591c45f8b39d2091ae096260 100644 (file)
@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
+       if (!efi_enabled(EFI_BOOT))
+               return;
+
        if (table->length < sizeof(bgrt_tab)) {
                pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       table->length, sizeof(bgrt_tab));
index 8c34d50a4d8032bbaba3322b3dee4ff22826a923..959777ec8a77bab62e49097cd93d711ccd311610 100644 (file)
 
 /* BIOS variables */
 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t const efi_SecureBoot_name[] = {
+static const efi_char16_t efi_SecureBoot_name[] = {
        'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
 };
-static const efi_char16_t const efi_SetupMode_name[] = {
+static const efi_char16_t efi_SetupMode_name[] = {
        'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
 };
 
index a4831fe0223bffebdda4727589453aa6cb8fb7b1..a2c59a08b2bd69919b23989feddff71a88855617 100644 (file)
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
-       amdgpu_vram_mgr_init,
-       amdgpu_vram_mgr_fini,
-       amdgpu_vram_mgr_new,
-       amdgpu_vram_mgr_del,
-       amdgpu_vram_mgr_debug
+       .init           = amdgpu_vram_mgr_init,
+       .takedown       = amdgpu_vram_mgr_fini,
+       .get_node       = amdgpu_vram_mgr_new,
+       .put_node       = amdgpu_vram_mgr_del,
+       .debug          = amdgpu_vram_mgr_debug
 };
index fb08193599092d0d6562e0c5b4c019b6be45bed2..90332f55cfba91b7a543da4ec3820809bb876336 100644 (file)
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_RPTR);
+               v = RREG32(mmVCE_RB_RPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_RPTR2);
+               v = RREG32(mmVCE_RB_RPTR2);
        else
-               return RREG32(mmVCE_RB_RPTR3);
+               v = RREG32(mmVCE_RB_RPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_WPTR);
+               v = RREG32(mmVCE_RB_WPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_WPTR2);
+               v = RREG32(mmVCE_RB_WPTR2);
        else
-               return RREG32(mmVCE_RB_WPTR3);
+               v = RREG32(mmVCE_RB_WPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
        else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring;
        int idx, r;
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[2];
-       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+               /* Program instance 0 reg space for two instances or instance 0 case
+               program instance 1 reg space for only instance 1 available case */
+               if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+                       ring = &adev->vce.ring[0];
+                       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[1];
+                       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[2];
+                       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+               }
+
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
index d5f53d04fa08c30a4053aa9fd07b04c795177e4e..83e40fe51b6212f6bbcf44b119c47a86b3fb1638 100644 (file)
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 
 static struct phm_master_table_item
 vega10_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_vega10_thermal_initialize},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
+       { .tableFunction = tf_vega10_thermal_initialize },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-       {NULL, tf_vega10_thermal_setup_fan_table},
-       {NULL, tf_vega10_thermal_start_smc_fan_control},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_setup_fan_table },
+       { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+       { }
 };
 
 static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
 
 static struct phm_master_table_item
 vega10_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_vega10_thermal_disable_alert},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_disable_alert },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
+       { }
 };
 
 struct phm_master_table_header
index 3e5f52110ea17384c84f568ba9b1a4955922523c..213fb837e1c40fe79bf536d54b083d99dee1c192 100644 (file)
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+       u8 oui[3];
+       bool is_branch;
+       u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+       /* Analogix 7737 needs reduced M and N at HBR2 link rates */
+       { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+       const struct dpcd_quirk *quirk;
+       u32 quirks = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+               quirk = &dpcd_quirk_list[i];
+
+               if (quirk->is_branch != is_branch)
+                       continue;
+
+               if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+                       continue;
+
+               quirks |= quirk->quirks;
+       }
+
+       return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch)
+{
+       struct drm_dp_dpcd_ident *ident = &desc->ident;
+       unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+       int ret, dev_id_len;
+
+       ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+       if (ret < 0)
+               return ret;
+
+       desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+       dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+       DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+                     is_branch ? "branch" : "sink",
+                     (int)sizeof(ident->oui), ident->oui,
+                     dev_id_len, ident->device_id,
+                     ident->hw_rev >> 4, ident->hw_rev & 0xf,
+                     ident->sw_major_rev, ident->sw_minor_rev,
+                     desc->quirks);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
index 09d3c4c3c858e8a05dcf4cbc9a14feb5910ff132..50294a7bd29da10f99e9dca59701b66c385bb66b 100644 (file)
@@ -82,14 +82,9 @@ err_file_priv_free:
        return ret;
 }
 
-static void exynos_drm_preclose(struct drm_device *dev,
-                                       struct drm_file *file)
-{
-       exynos_drm_subdrv_close(dev, file);
-}
-
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
+       exynos_drm_subdrv_close(dev, file);
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
                                  | DRIVER_ATOMIC | DRIVER_RENDER,
        .open                   = exynos_drm_open,
-       .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
        .gem_free_object_unlocked = exynos_drm_gem_free_object,
index cb317693059696b3c86bda9c93c2e5ebcefad921..39c740572034a6d4f3d69f2ab861b1d0a8f80803 100644 (file)
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occurred through
  *     this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 base;
index fc4fda738906251de7a6047c3fc8f3699c469b79..d404de86d5f9de1d5fe07f856fe4a10fafdefd91 100644 (file)
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 {
        struct device *dev = dsi->dev;
        struct device_node *node = dev->of_node;
-       struct device_node *ep;
        int ret;
 
        ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
        if (ret < 0)
                return ret;
 
-       ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
-       if (!ep) {
-               dev_err(dev, "no output port with endpoint specified\n");
-               return -EINVAL;
-       }
-
-       ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
                                     &dsi->burst_clk_rate);
        if (ret < 0)
-               goto end;
+               return ret;
 
-       ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
                                     &dsi->esc_clk_rate);
        if (ret < 0)
-               goto end;
-
-       of_node_put(ep);
+               return ret;
 
        dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
        if (!dsi->bridge_node)
                return -EINVAL;
 
-end:
-       of_node_put(ep);
-
-       return ret;
+       return 0;
 }
 
 static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
 static int exynos_dsi_remove(struct platform_device *pdev)
 {
+       struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+       of_node_put(dsi->bridge_node);
+
        pm_runtime_disable(&pdev->dev);
 
        component_del(&pdev->dev, &exynos_dsi_component_ops);
index dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2..24fe04d6307b0383da918308827a12027ded9fbe 100644 (file)
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine;
+       struct intel_vgpu_workload *pos, *n;
+       unsigned int tmp;
+
+       /* free the unsubmited workloads in the queues. */
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               list_for_each_entry_safe(pos, n,
+                       &vgpu->workload_q_head[engine->id], list) {
+                       list_del_init(&pos->list);
+                       free_workload(pos);
+               }
+       }
+}
+
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
 {
+       clean_workloads(vgpu, ALL_ENGINES);
        kmem_cache_destroy(vgpu->workloads);
 }
 
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
-       struct intel_vgpu_workload *pos, *n;
        unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               /* free the unsubmited workload in the queue */
-               list_for_each_entry_safe(pos, n,
-                       &vgpu->workload_q_head[engine->id], list) {
-                       list_del_init(&pos->list);
-                       free_workload(pos);
-               }
-
+       clean_workloads(vgpu, engine_mask);
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
-       }
 }
index c995e540ff96e1f8a18a9232de2b26794fa03aa2..0ffd696545927277200d8b2332a024168672c5c5 100644 (file)
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       i915_reg_t reg = {.reg = offset};
+       u32 v = *(u32 *)p_data;
+
+       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+               return intel_vgpu_default_mmio_write(vgpu,
+                               offset, p_data, bytes);
 
        switch (offset) {
        case 0x4ddc:
-               vgpu_vreg(vgpu, offset) = 0x8000003c;
-               /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
                break;
        case 0x42080:
-               vgpu_vreg(vgpu, offset) = 0x8000;
-               /* WaCompressedResourceDisplayNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceDisplayNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+               break;
+       case 0xe194:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+               break;
+       case 0x7014:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
                break;
        default:
                return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
index 3036d4835b0fa7a3b366a31d0b6ed18fc7889ae1..c994fe6e65b2eafe6a133fccb70f7c5db5019b00 100644 (file)
@@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dev_priv->ipc_enabled = false;
 
-       /* Everything is in place, we can now relax! */
-       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
-                driver.name, driver.major, driver.minor, driver.patchlevel,
-                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
                DRM_INFO("DRM_I915_DEBUG enabled\n");
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
index c9b0949f6c1a2aba281c9a4bbf8d8b2c9ede3785..963f6d4481f76ec54b5aeab138b0cca3f4ff90e5 100644 (file)
@@ -562,7 +562,8 @@ struct intel_link_m_n {
 
 void intel_link_compute_m_n(int bpp, int nlanes,
                            int pixel_clock, int link_clock,
-                           struct intel_link_m_n *m_n);
+                           struct intel_link_m_n *m_n,
+                           bool reduce_m_n);
 
 /* Interface history:
  *
index a0563e18d753fd84731f8372efc7a938d2898a6b..50b8f1139ff99d6dc8d3ec225abf251d6af4465d 100644 (file)
@@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                    appgtt->base.allocate_va_range) {
                        ret = appgtt->base.allocate_va_range(&appgtt->base,
                                                             vma->node.start,
-                                                            vma->node.size);
+                                                            vma->size);
                        if (ret)
                                goto err_pages;
                }
index 129ed303a6c46e2f856eb1abc84990079abefb65..57d9f7f4ef159cd6eb30f9bc0bd10683eec5123f 100644 (file)
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
                return;
 
        mutex_unlock(&dev->struct_mutex);
-
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
 }
 
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_ACTIVE);
        intel_runtime_pm_put(dev_priv);
 
-       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
        return freed;
 }
 
index fd97fe00cd0d2ad00e1c7258eeb51ecf0f60d4c1..190f6aa5d15eb82bf51cbaed00b16ea8c5d4f5bc 100644 (file)
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
-       u32 val;
 
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_LPE_PIPE_A_INTERRUPT |
+               I915_LPE_PIPE_B_INTERRUPT;
+
        if (IS_CHERRYVIEW(dev_priv))
-               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+                       I915_LPE_PIPE_C_INTERRUPT;
 
        WARN_ON(dev_priv->irq_mask != ~0);
 
-       val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT |
-               I915_LPE_PIPE_C_INTERRUPT);
-
-       enable_mask |= val;
-
        dev_priv->irq_mask = ~enable_mask;
 
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
index 5a7c63e64381e48a193610305973c468502565d2..65b837e96fe629d58f539b253dc1ab14595a459b 100644 (file)
@@ -8280,7 +8280,7 @@ enum {
 
 /* MIPI DSI registers */
 
-#define _MIPI_PORT(port, a, c) ((port) ? c : a)        /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c)    /* ports A and C only */
 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
 #define MIPIO_TXESC_CLK_DIV1                   _MMIO(0x160004)
index 3617927af269afb9872b0d5d419873f0945f880c..3cabe52a4e3b168e176d1f55abdae65f67219ef7 100644 (file)
@@ -6101,7 +6101,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n);
+                              link_bw, &pipe_config->fdi_m_n, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-                       uint32_t *ret_m, uint32_t *ret_n)
+                       uint32_t *ret_m, uint32_t *ret_n,
+                       bool reduce_m_n)
 {
        /*
         * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6286,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * values. The passed in values are more likely to have the least
         * significant bits zero than M after rounding below, so do this first.
         */
-       while ((m & 1) == 0 && (n & 1) == 0) {
-               m >>= 1;
-               n >>= 1;
+       if (reduce_m_n) {
+               while ((m & 1) == 0 && (n & 1) == 0) {
+                       m >>= 1;
+                       n >>= 1;
+               }
        }
 
        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
 void
 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
-                      struct intel_link_m_n *m_n)
+                      struct intel_link_m_n *m_n,
+                      bool reduce_m_n)
 {
        m_n->tu = 64;
 
        compute_m_n(bits_per_pixel * pixel_clock,
                    link_clock * nlanes * 8,
-                   &m_n->gmch_m, &m_n->gmch_n);
+                   &m_n->gmch_m, &m_n->gmch_n,
+                   reduce_m_n);
 
        compute_m_n(pixel_clock, link_clock,
-                   &m_n->link_m, &m_n->link_n);
+                   &m_n->link_m, &m_n->link_n,
+                   reduce_m_n);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
index ee77b519835c5fd9d8c582a9c3169b43d06ebab6..fc691b8b317cf3924a98adfb51ca1183a6f2a6b3 100644 (file)
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
-       u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
-                                                     DP_SINK_OUI;
-
-       return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
-              sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
-       struct intel_dp_desc *desc = &intel_dp->desc;
-       bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-                      DP_OUI_SUPPORT;
-       int dev_id_len;
-
-       if (!__intel_dp_read_desc(intel_dp, desc))
-               return false;
-
-       dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
-       DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
-                     drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
-                     (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
-                     dev_id_len, desc->device_id,
-                     desc->hw_rev >> 4, desc->hw_rev & 0xf,
-                     desc->sw_major_rev, desc->sw_minor_rev);
-
-       return true;
-}
-
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
        int common_len;
        uint8_t link_bw, rate_select;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1753,7 +1724,8 @@ found:
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
-                               &pipe_config->dp_m2_n2);
+                               &pipe_config->dp_m2_n2,
+                               reduce_m_n);
        }
 
        /*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
        if (!intel_dp_read_dpcd(intel_dp))
                return false;
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        intel_dp_print_rates(intel_dp);
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        intel_dp_configure_mst(intel_dp);
 
index c1f62eb07c07a7ce49b3ed39e1e6ee2b23eb65e8..989e25577ac0445f9e7632a575a6de38d7f9ec49 100644 (file)
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int mst_pbn;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        pipe_config->has_pch_encoder = false;
        bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        pipe_config->dp_m_n.tu = slots;
 
index aaee3949a42267603a5dfa9deb9be87dd0f7b2b4..f630c7af50205540b64481d9c3ee559fbdfc8f7f 100644 (file)
@@ -906,14 +906,6 @@ enum link_m_n_set {
        M2_N2
 };
 
-struct intel_dp_desc {
-       u8 oui[3];
-       u8 device_id[6];
-       u8 hw_rev;
-       u8 sw_major_rev;
-       u8 sw_minor_rev;
-} __packed;
-
 struct intel_dp_compliance_data {
        unsigned long edid;
        uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
        /* Max link BW for the sink as per DPCD registers */
        int max_sink_link_bw;
        /* sink or branch descriptor */
-       struct intel_dp_desc desc;
+       struct drm_dp_desc desc;
        struct drm_dp_aux aux;
        enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 }
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
-                         struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
index 668f00480d97c0ff0418c19dfaaffec31fc65341..292fedf30b0010c33e1eefd8f643b1b87bd38edd 100644 (file)
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
 
 static void lpe_audio_irq_unmask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask &= ~val;
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static void lpe_audio_irq_mask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask |= val;
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       POSTING_READ(VLV_IIR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static struct irq_chip lpe_audio_irqchip = {
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
 
        desc = irq_to_desc(dev_priv->lpe_audio.irq);
 
-       lpe_audio_irq_mask(&desc->irq_data);
-
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
index c8f7c631fc1f8e354cac0038c80aa35d0a1dd0d2..dac4e003c1f317ec402110132bad0c3a734bf52a 100644 (file)
@@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        ce->ring = ring;
        ce->state = vma;
-       ce->initialised = engine->init_context == NULL;
+       ce->initialised |= engine->init_context == NULL;
 
        return 0;
 
index 71cbe9c089320cbc305c827bacd41fcbf1e542ce..5abef482eacf1b24780edea4c40ab7e593a42dc6 100644 (file)
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
                return false;
        }
 
-       intel_dp_read_desc(dp);
+       drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
 
        DRM_DEBUG_KMS("Success: LSPCON init\n");
        return true;
index 1afb8b06e3e19bf23ed287277415afb364504b23..12b85b3278cd1cfc53b159253e9152e3d8f1784b 100644 (file)
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = NULL;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
-                       if (dw == 0) {
+                       if (!obj) {
                                obj = create_test_object(ctx, file, &objects);
                                if (IS_ERR(obj)) {
                                        err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
                                goto out_unlock;
                        }
 
-                       if (++dw == max_dwords(obj))
+                       if (++dw == max_dwords(obj)) {
+                               obj = NULL;
                                dw = 0;
+                       }
                        ndwords++;
                }
                ncontexts++;
index 5b8e23d051f2f3752a180df4abedccefcfebc3ed..0a31cd6d01ce145f3f112b8c19d17dcdd46ea524 100644 (file)
@@ -13,6 +13,7 @@ config DRM_MSM
        select QCOM_SCM
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
+       select PM_OPP
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index f8f48d014978c0ccd5cc9ffcc3d699b86f779399..9c34d7824988654ab2f8366741724da8ac18b82a 100644 (file)
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
        return 0;
 }
 
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
        .map = mdss_hw_irqdomain_map,
        .xlate = irq_domain_xlate_onecell,
 };
index a38c5fe6cc19752a9832618af1a4f146bab4a8df..7d3741215387110bb7f7ad622cb54d0d411fb9f1 100644 (file)
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
 
        mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
                        sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return NULL;
 
-       if (mdp5_state && mdp5_state->base.fb)
-               drm_framebuffer_reference(mdp5_state->base.fb);
+       __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
        return &mdp5_state->base;
 }
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
                        mdp5_pipe_release(state->state, old_hwpipe);
                        mdp5_pipe_release(state->state, old_right_hwpipe);
                }
+       } else {
+               mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+               mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+               mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
        }
 
        return 0;
index 87b5695d4034df0e118475617167ee990ccfc490..9d498eb81906220705d85c57260cd4b1f82fa1fc 100644 (file)
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export   = drm_gem_prime_export,
        .gem_prime_import   = drm_gem_prime_import,
+       .gem_prime_res_obj  = msm_gem_prime_res_obj,
        .gem_prime_pin      = msm_gem_prime_pin,
        .gem_prime_unpin    = msm_gem_prime_unpin,
        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
index 28b6f9ba50664509bb44fa2b5704d3bcc86a67af..1b26ca626528ab5f4435f689d0a213f0e672aaa5 100644 (file)
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
index 3f299c537b77ae347bce4c92368ed774c7695459..a2f89bac9c160674f5f103f75a04a7a92e7c5b99 100644 (file)
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 }
 
 struct msm_fence {
-       struct msm_fence_context *fctx;
        struct dma_fence base;
+       struct msm_fence_context *fctx;
 };
 
 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
        return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct dma_fence *fence)
-{
-       struct msm_fence *f = to_msm_fence(fence);
-       kfree_rcu(f, base.rcu);
-}
-
 static const struct dma_fence_ops msm_fence_ops = {
        .get_driver_name = msm_fence_get_driver_name,
        .get_timeline_name = msm_fence_get_timeline_name,
        .enable_signaling = msm_fence_enable_signaling,
        .signaled = msm_fence_signaled,
        .wait = dma_fence_default_wait,
-       .release = msm_fence_release,
+       .release = dma_fence_free,
 };
 
 struct dma_fence *
index 68e509b3b9e4d08730e3901f46a397519c33e77c..50289a23baf8df27c4bc1aebf067da2b011b8f28 100644 (file)
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
        struct msm_gem_object *msm_obj;
        bool use_vram = false;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
        case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        size = PAGE_ALIGN(dmabuf->size);
 
+       /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+       mutex_lock(&dev->struct_mutex);
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+       mutex_unlock(&dev->struct_mutex);
+
        if (ret)
                goto fail;
 
index 60bb290700cef9c32fc2ca0dd2db229a6a7ffedf..13403c6da6c75012fa5f17f4b0b63075ddf20874 100644 (file)
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
        if (!obj->import_attach)
                msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       return msm_obj->resv;
+}
index 1c545ebe6a5a0f875a995b1db3a1204b0b21d57e..7832e6421d250d0bd78400057e46dce07dc2d18c 100644 (file)
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                if (!in_fence)
                        return -EINVAL;
 
-               /* TODO if we get an array-fence due to userspace merging multiple
-                * fences, we need a way to determine if all the backing fences
-                * are from our own context..
+               /*
+                * Wait if the fence is from a foreign context, or if the fence
+                * array contains any fence from a foreign context.
                 */
-
-               if (in_fence->context != gpu->fctx->context) {
+               if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
                        ret = dma_fence_wait(in_fence, true);
                        if (ret)
                                return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               if ((submit_cmd.size + submit_cmd.submit_offset) >=
-                               msm_obj->base.size) {
+               if (!submit_cmd.size ||
+                       ((submit_cmd.size + submit_cmd.submit_offset) >
+                               msm_obj->base.size)) {
                        DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
index 97b9c38c6b3ff7e05adf9ea84e8328d19e32b90f..0fdc88d79ca87b3a54709aa4d527db80ca0997dc 100644 (file)
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
                gpu->grp_clks[i] = get_clock(dev, name);
 
                /* Remember the key clocks that we need to control later */
-               if (!strcmp(name, "core"))
+               if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
                        gpu->core_clk = gpu->grp_clks[i];
-               else if (!strcmp(name, "rbbmtimer"))
+               else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
                        gpu->rbbmtimer_clk = gpu->grp_clks[i];
 
                ++i;
index fe40e5e499dd4122ce0f623d9df776c0a0cf2bdf..687705c5079422a82b9977e5694d0cd45b2a47bc 100644 (file)
@@ -275,10 +275,12 @@ config HID_EMS_FF
         - Trio Linker Plus II
 
 config HID_ELECOM
-       tristate "ELECOM BM084 bluetooth mouse"
+       tristate "ELECOM HID devices"
        depends on HID
        ---help---
-       Support for the ELECOM BM084 (bluetooth mouse).
+       Support for ELECOM devices:
+         - BM084 Bluetooth Mouse
+         - DEFT Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 16df6cc902359ea620de3f079be796d32be4783a..a6268f2f7408a520660c6add3c734a8006474393 100644 (file)
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 #define QUIRK_NO_CONSUMER_USAGES       BIT(4)
 #define QUIRK_USE_KBD_BACKLIGHT                BIT(5)
+#define QUIRK_T100_KEYBOARD            BIT(6)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
                drvdata->kbd_backlight->removed = true;
                cancel_work_sync(&drvdata->kbd_backlight->work);
        }
+
+       hid_hw_stop(hdev);
 }
 
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
                rdesc[55] = 0xdd;
        }
+       if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
+                *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+               hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
+               rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
+       }
+
        return rdesc;
 }
 
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+         QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 37084b6457851ebe0d52361e327db9df07b86b2c..04cee65531d761c18e53775ffc784c3c3d993daa 100644 (file)
@@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
index 6e3848a8d8dd1416a0091ce0e7d263325dd8910b..e2c7465df69f3ae74c2cb1979c531b02e2934089 100644 (file)
@@ -1,10 +1,8 @@
 /*
- *  HID driver for Elecom BM084 (bluetooth mouse).
- *  Removes a non-existing horizontal wheel from
- *  the HID descriptor.
- *  (This module is based on "hid-ortek".)
- *
+ *  HID driver for ELECOM devices.
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+ *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+ *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
  */
 
 /*
 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-               hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
-               rdesc[47] = 0x00;
+       switch (hdev->product) {
+       case USB_DEVICE_ID_ELECOM_BM084:
+               /* The BM084 Bluetooth mouse includes a non-existing horizontal
+                * wheel in the HID descriptor. */
+               if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
+                       hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
+                       rdesc[47] = 0x00;
+               }
+               break;
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+               /* The DEFT trackball has eight buttons, but its descriptor only
+                * reports five, disabling the three Fn buttons on the top of
+                * the mouse.
+                *
+                * Apply the following diff to the descriptor:
+                *
+                * Collection (Physical),              Collection (Physical),
+                *     Report ID (1),                      Report ID (1),
+                *     Report Count (5),           ->      Report Count (8),
+                *     Report Size (1),                    Report Size (1),
+                *     Usage Page (Button),                Usage Page (Button),
+                *     Usage Minimum (01h),                Usage Minimum (01h),
+                *     Usage Maximum (05h),        ->      Usage Maximum (08h),
+                *     Logical Minimum (0),                Logical Minimum (0),
+                *     Logical Maximum (1),                Logical Maximum (1),
+                *     Input (Variable),                   Input (Variable),
+                *     Report Count (1),           ->      Report Count (0),
+                *     Report Size (3),                    Report Size (3),
+                *     Input (Constant),                   Input (Constant),
+                *     Report Size (16),                   Report Size (16),
+                *     Report Count (2),                   Report Count (2),
+                *     Usage Page (Desktop),               Usage Page (Desktop),
+                *     Usage (X),                          Usage (X),
+                *     Usage (Y),                          Usage (Y),
+                *     Logical Minimum (-32768),           Logical Minimum (-32768),
+                *     Logical Maximum (32767),            Logical Maximum (32767),
+                *     Input (Variable, Relative),         Input (Variable, Relative),
+                * End Collection,                     End Collection,
+                */
+               if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       rdesc[13] = 8; /* Button/Variable Report Count */
+                       rdesc[21] = 8; /* Button/Variable Usage Maximum */
+                       rdesc[29] = 0; /* Button/Constant Report Count */
+               }
+               break;
        }
        return rdesc;
 }
 
 static const struct hid_device_id elecom_devices[] = {
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 643390ba749d96c5ddb5fbde68fda45c38569c3d..8ca1e8ce0af24e325957526c125ccf55d9081eb8 100644 (file)
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2     0x175b
+#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD    0x17e0
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD     0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 20b40ad2632503754685b84cc07d8787a4a44515..1d6c997b300149269367d00fb5db66b7c2ea25b7 100644 (file)
@@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
+               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -388,16 +389,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
-               __set_bit(BTN_TOOL_FINGER, input->keybit);
-               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-               __set_bit(BTN_TOUCH, input->keybit);
-               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
+       __set_bit(BTN_TOOL_FINGER, input->keybit);
+       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+       __set_bit(BTN_TOUCH, input->keybit);
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index 8daa8ce64ebba51e91e57ec801fa7e702fb2a072..fb55fb4c39fcfecaca55c0b8720d28d2f9717678 100644 (file)
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        return 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+       acpi_handle handle = ACPI_HANDLE(dev);
+       struct acpi_device *adev;
+
+       if (handle && acpi_bus_get_device(handle, &adev) == 0)
+               acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
        {"ACPI0C50", 0 },
        {"PNP0C50", 0 },
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 {
        return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0)
                goto err_regulator;
 
+       i2c_hid_acpi_fix_up_power(&client->dev);
+
        pm_runtime_get_noresume(&client->dev);
        pm_runtime_set_active(&client->dev);
        pm_runtime_enable(&client->dev);
index 4b225fb19a16842f635026d1b1023d5d1cf5068e..e274c9dc32f3a211d97d02f2b6477f20c9121fac 100644 (file)
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
        unsigned char *data = wacom->data;
 
-       if (wacom->pen_input)
+       if (wacom->pen_input) {
                dev_dbg(wacom->pen_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
-       else if (wacom->touch_input)
+
+               if (len == WACOM_PKGLEN_PENABLED ||
+                   data[0] == WACOM_REPORT_PENABLED)
+                       return wacom_tpc_pen(wacom);
+       }
+       else if (wacom->touch_input) {
                dev_dbg(wacom->touch_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
 
-       switch (len) {
-       case WACOM_PKGLEN_TPC1FG:
-               return wacom_tpc_single_touch(wacom, len);
+               switch (len) {
+               case WACOM_PKGLEN_TPC1FG:
+                       return wacom_tpc_single_touch(wacom, len);
 
-       case WACOM_PKGLEN_TPC2FG:
-               return wacom_tpc_mt_touch(wacom);
+               case WACOM_PKGLEN_TPC2FG:
+                       return wacom_tpc_mt_touch(wacom);
 
-       case WACOM_PKGLEN_PENABLED:
-               return wacom_tpc_pen(wacom);
+               default:
+                       switch (data[0]) {
+                       case WACOM_REPORT_TPC1FG:
+                       case WACOM_REPORT_TPCHID:
+                       case WACOM_REPORT_TPCST:
+                       case WACOM_REPORT_TPC1FGE:
+                               return wacom_tpc_single_touch(wacom, len);
 
-       default:
-               switch (data[0]) {
-               case WACOM_REPORT_TPC1FG:
-               case WACOM_REPORT_TPCHID:
-               case WACOM_REPORT_TPCST:
-               case WACOM_REPORT_TPC1FGE:
-                       return wacom_tpc_single_touch(wacom, len);
-
-               case WACOM_REPORT_TPCMT:
-               case WACOM_REPORT_TPCMT2:
-                       return wacom_mt_touch(wacom);
+                       case WACOM_REPORT_TPCMT:
+                       case WACOM_REPORT_TPCMT2:
+                               return wacom_mt_touch(wacom);
 
-               case WACOM_REPORT_PENABLED:
-                       return wacom_tpc_pen(wacom);
+                       }
                }
        }
 
index bf7419a56454e3834ea2c2034d3170591f1ad97f..f4eace5ea184095eb0c170c4f3f1647f72b8c537 100644 (file)
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
        pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
        pr_debug("       version: %d\n", le32_to_cpu(sb->version));
        pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
-                *(__u32 *)(sb->uuid+0),
-                *(__u32 *)(sb->uuid+4),
-                *(__u32 *)(sb->uuid+8),
-                *(__u32 *)(sb->uuid+12));
+                le32_to_cpu(*(__u32 *)(sb->uuid+0)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+4)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+8)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+12)));
        pr_debug("        events: %llu\n",
                 (unsigned long long) le64_to_cpu(sb->events));
        pr_debug("events cleared: %llu\n",
index cd8139593ccd50655a2329460cc8de9d175eac86..840c1496b2b138ef504bde4c441b1082df183473 100644 (file)
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index c7f7c8d7657670850adedceb505538e1b9cdb2ce..7910bfe50da4469c44b571363cc6696f74f5fa42 100644 (file)
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
                        for (i = 0; i < commit_sections; i++)
                                rw_section_mac(ic, commit_start + i, true);
                }
-               rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
+               rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+                          commit_sections, &io_comp);
        } else {
                unsigned to_end;
                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
 }
 
-/* FIXME: use new kvmalloc */
-static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
-{
-       void *ptr = NULL;
-
-       if (size <= PAGE_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | gfp);
-       if (!ptr && size <= KMALLOC_MAX_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
-       if (!ptr)
-               ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
-
-       return ptr;
-}
-
 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
 {
        unsigned i;
@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
        struct page_list *pl;
        unsigned i;
 
-       pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
+       pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
        if (!pl)
                return NULL;
 
@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
        struct scatterlist **sl;
        unsigned i;
 
-       sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
+       sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
        if (!sl)
                return NULL;
 
@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 
                n_pages = (end_index - start_index + 1);
 
-               s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
+               s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
                if (!s) {
                        dm_integrity_free_journal_scatterlist(ic, sl);
                        return NULL;
@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
+                       sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
                        if (!sg) {
                                *error = "Unable to allocate sg list";
                                r = -ENOMEM;
@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                r = -ENOMEM;
                                goto bad;
                        }
-                       ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
+                       ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
                        if (!ic->sk_requests) {
                                *error = "Unable to allocate sk requests";
                                r = -ENOMEM;
@@ -2740,7 +2726,7 @@ retest_commit_id:
                r = -ENOMEM;
                goto bad;
        }
-       ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
+       ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
        if (!ic->journal_tree) {
                *error = "Could not allocate memory for journal tree";
                r = -ENOMEM;
index 0555b4410e0598a6096642f10978ad6798bc5f98..41852ae287a58c29e675dd4b794f1670f9dc53e8 100644 (file)
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        }
 
        /*
-        * Try to avoid low memory issues when a device is suspended.
+        * Use __GFP_HIGH to avoid low memory issues when a device is
+        * suspended and the ioctl is needed to resume it.
         * Use kmalloc() rather than vmalloc() when we can.
         */
        dmi = NULL;
        noio_flag = memalloc_noio_save();
-       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
        memalloc_noio_restore(noio_flag);
 
        if (!dmi) {
index a95cbb80fb34444144bad346b3e769c625e8c788..e61c45047c25a9ba2683c313fbc2151c9051b178 100644 (file)
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
index b93476c3ba3f9767fb133fed977e7a888cc0698e..c5534d294773fc0267a1b4c7438a3316e74d417a 100644 (file)
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE,
+                                REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
                ps->valid = 0;
 
        /*
index 97de961a3bfc80d11497c5ac2558ce7ad7a57a7e..1ec9b2c51c076d99ba6003f90eae608d9c9e35af 100644 (file)
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
                return r;
        }
 
-       if (likely(v->version >= 1))
+       if (likely(v->salt_size && (v->version >= 1)))
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
        return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
 {
        int r;
 
-       if (unlikely(!v->version)) {
+       if (unlikely(v->salt_size && (!v->version))) {
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
                if (r < 0) {
index 6ef9500226c0c7d789ed78e6876195f73ef9d6b7..37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b 100644 (file)
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio, NULL, 0);
        md->flush_bio.bi_bdev = md->bdev;
-       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
        dm_stats_init(&md->stats);
 
index 7299ce2f08a810555a0407a423a512a0f59f190c..03082e17c65cc87af2a44a8020bda6cbdb8b0262 100644 (file)
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
        cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
        lock_comm(cinfo, 1);
        ret = __sendmsg(cinfo, &cmsg);
-       if (ret)
+       if (ret) {
+               unlock_comm(cinfo);
                return ret;
+       }
        cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
        ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
        cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
index 10367ffe92e3e37704f5e32793ea97175c8b15e6..212a6777ff3172dd9e20401dd7bf87ad5d2f7468 100644 (file)
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
            test_bit(FailFast, &rdev->flags) &&
            !test_bit(LastDev, &rdev->flags))
                ff = MD_FAILFAST;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
 
        atomic_inc(&mddev->pending_writes);
        submit_bio(bio);
index 4c00bc248287e4ab89b492225e0d054973725549..0a7af8b0a80a031a99a7af1742e2d64e6df0d106 100644 (file)
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                             mb, PAGE_SIZE));
        if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-                         REQ_FUA, false)) {
+                         REQ_SYNC | REQ_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                                     mb, PAGE_SIZE));
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
-                            REQ_OP_WRITE, REQ_FUA, false);
+                            REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
                sh->log_start = ctx->pos;
                list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
                atomic_inc(&log->stripe_in_journal_count);
index 5d25bebf3328e4967334465916aca3e3c750e447..ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709 100644 (file)
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
 
        if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
-                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0,
-                         false)) {
+                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
+                         REQ_FUA, 0, false)) {
                md_error(rdev->mddev, rdev);
                ret = -EIO;
        }
index 9c4f7659f8b1337c99cfd0ab5070012e3f658849..722064689e822f3b876411f076921e244abbec2f 100644 (file)
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                sh->check_state = check_state_compute_run;
                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
                        }
                } else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                int *target = &sh->ops.target;
 
                                sh->ops.target = -1;
index 57a842ff309747382a928998d0209072476da421..b7731b18ecae1741ebee3be1bf9188659bfeb398 100644 (file)
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_h264_if = {
-       vdec_h264_init,
-       vdec_h264_decode,
-       vdec_h264_get_param,
-       vdec_h264_deinit,
+       .init           = vdec_h264_init,
+       .decode         = vdec_h264_decode,
+       .get_param      = vdec_h264_get_param,
+       .deinit         = vdec_h264_deinit,
 };
 
 struct vdec_common_if *get_h264_dec_comm_if(void);
index 6e7a62ae0842c2e69bb65e31bb1fa80e7c9ce44c..b9fad6a488799ebc7fad8b12b6990b9c33d7c60b 100644 (file)
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
 }
 
 static struct vdec_common_if vdec_vp8_if = {
-       vdec_vp8_init,
-       vdec_vp8_decode,
-       vdec_vp8_get_param,
-       vdec_vp8_deinit,
+       .init           = vdec_vp8_init,
+       .decode         = vdec_vp8_decode,
+       .get_param      = vdec_vp8_get_param,
+       .deinit         = vdec_vp8_deinit,
 };
 
 struct vdec_common_if *get_vp8_dec_comm_if(void);
index 5539b1853f166a611ed678bc1274f55e48f1347c..1daee1207469b3ea9e740676aa80765a4280c118 100644 (file)
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_vp9_if = {
-       vdec_vp9_init,
-       vdec_vp9_decode,
-       vdec_vp9_get_param,
-       vdec_vp9_deinit,
+       .init           = vdec_vp9_init,
+       .decode         = vdec_vp9_decode,
+       .get_param      = vdec_vp9_get_param,
+       .deinit         = vdec_vp9_deinit,
 };
 
 struct vdec_common_if *get_vp9_dec_comm_if(void);
index c862cd4583cc93694747e191f5e3537e5767bfa5..b8069eec18cb44ef335535789f0e2a61ffaf4bd1 100644 (file)
@@ -309,6 +309,9 @@ static inline enum xp_retval
 xpc_send(short partid, int ch_number, u32 flags, void *payload,
         u16 payload_size)
 {
+       if (!xpc_interface.send)
+               return xpNotLoaded;
+
        return xpc_interface.send(partid, ch_number, flags, payload,
                                  payload_size);
 }
@@ -317,6 +320,9 @@ static inline enum xp_retval
 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
                u16 payload_size, xpc_notify_func func, void *key)
 {
+       if (!xpc_interface.send_notify)
+               return xpNotLoaded;
+
        return xpc_interface.send_notify(partid, ch_number, flags, payload,
                                         payload_size, func, key);
 }
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 static inline void
 xpc_received(short partid, int ch_number, void *payload)
 {
-       return xpc_interface.received(partid, ch_number, payload);
+       if (xpc_interface.received)
+               xpc_interface.received(partid, ch_number, payload);
 }
 
 static inline enum xp_retval
 xpc_partid_to_nasids(short partid, void *nasids)
 {
+       if (!xpc_interface.partid_to_nasids)
+               return xpNotLoaded;
+
        return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
index 01be66d02ca8ce52c84b809fa55a7aeb6b219bc2..6d7f557fd1c1a1e885eb3dad64886b3e0afe32fe 100644 (file)
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
 EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
+ * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
  */
-static enum xp_retval
-xpc_notloaded(void)
-{
-       return xpNotLoaded;
-}
-
-struct xpc_interface xpc_interface = {
-       (void (*)(int))xpc_notloaded,
-       (void (*)(int))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-                          void *))xpc_notloaded,
-       (void (*)(short, int, void *))xpc_notloaded,
-       (enum xp_retval(*)(short, void *))xpc_notloaded
-};
+struct xpc_interface xpc_interface = { };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
 void
 xpc_clear_interface(void)
 {
-       xpc_interface.connect = (void (*)(int))xpc_notloaded;
-       xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
-       xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
-           xpc_notloaded;
-       xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
-                                                      u16, xpc_notify_func,
-                                                      void *))xpc_notloaded;
-       xpc_interface.received = (void (*)(short, int, void *))
-           xpc_notloaded;
-       xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
-           xpc_notloaded;
+       memset(&xpc_interface, 0, sizeof(xpc_interface));
 }
 EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        mutex_unlock(&registration->mutex);
 
-       xpc_interface.connect(ch_number);
+       if (xpc_interface.connect)
+               xpc_interface.connect(ch_number);
 
        return xpSuccess;
 }
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
        registration->assigned_limit = 0;
        registration->idle_limit = 0;
 
-       xpc_interface.disconnect(ch_number);
+       if (xpc_interface.disconnect)
+               xpc_interface.disconnect(ch_number);
 
        mutex_unlock(&registration->mutex);
 
index 34c862f213c7e85554830b741cdf051f1fbbd86d..0a9b78705ee810c9e18c6fa1f46551fc27374287 100644 (file)
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
                return -EINVAL;
 
        gsi = gicc->performance_interrupt;
+
+       /*
+        * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
+        * have an interrupt. QEMU advertises this by using a GSI of zero,
+        * which is not known to be valid on any hardware despite being
+        * valid per the spec. Take the pragmatic approach and reject a
+        * GSI of zero for now.
+        */
+       if (!gsi)
+               return 0;
+
        if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
                trigger = ACPI_EDGE_SENSITIVE;
        else
index 1653cbda6a8299b33b5cebae92bd4710e41412a4..bd459a93b0e7e9b11c999dd4bf9b95c3500be3e2 100644 (file)
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
  * pinctrl_generic_free_groups() - removes all pin groups
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl groups
+ * are allocated with devm_kzalloc() so no need to free them here.
  */
 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct group_desc *group;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_groups, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_groups; i++) {
-               group = radix_tree_lookup(&pctldev->pin_group_tree,
-                                         indices[i]);
-               radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
-               devm_kfree(pctldev->dev, group);
-       }
+               radix_tree_delete(&pctldev->pin_group_tree, iter.index);
 
        pctldev->num_groups = 0;
 }
index 41b5b07d5a2bf51f6b0623597c294862910de78c..6852010a6d708b5010555cbb141bf57ac31de077 100644 (file)
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~(mask << shift);
+       tmp |= value << shift;
+       writel(tmp, reg);
+}
+
 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                               unsigned group)
 {
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                reg += bank * 0x20 + pin / 16 * 0x10;
                shift = pin % 16 * 2;
 
-               writel(0x3 << shift, reg + CLR);
-               writel(g->muxsel[i] << shift, reg + SET);
+               mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
        }
 
        return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
                        /* mA */
                        if (config & MA_PRESENT) {
                                shift = pin % 8 * 4;
-                               writel(0x3 << shift, reg + CLR);
-                               writel(ma << shift, reg + SET);
+                               mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
                        }
 
                        /* vol */
index 2debba62fac90d956ce37cd09805c518ee4a8da5..20f1b44939944614ff270c757fc7152f901e9f09 100644 (file)
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
  * is not listed below.
  */
 static const struct dmi_system_id chv_no_valid_mask[] = {
+       /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
        {
-               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-               .ident = "Acer Chromebook (CYAN)",
+               .ident = "Intel_Strago based Chromebooks (All models)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
-       }
+       },
+       {
+               .ident = "Acer Chromebook R11 (Cyan)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+               },
+       },
+       {
+               .ident = "Samsung Chromebook 3 (Celes)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+               },
+       },
+       {}
 };
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
index 0d6b7f4b82af34a2a85e51c924ca8420bd7a6268..720a19fd38d2c6c24d30e5e1c7dcdf70cae0e590 100644 (file)
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
                                "input bias pull to pin specific state", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
        { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
        { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
-       { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
        { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
        { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
        { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
        { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
        { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
-       { "output-enable", PIN_CONFIG_OUTPUT, 1, },
        { "output-high", PIN_CONFIG_OUTPUT, 1, },
        { "output-low", PIN_CONFIG_OUTPUT, 0, },
        { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
index 9fd6d9087dc508ca7731d7f1e868988e0e320cc2..16b3ae5e4f440c4769db55ebf7c61ebae7e1e5c1 100644 (file)
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
  * pinmux_generic_free_functions() - removes all functions
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl
+ * functions are allocated with devm_kzalloc() so no need to free
+ * them here.
  */
 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct function_desc *function;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_functions, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_functions; i++) {
-               function = radix_tree_lookup(&pctldev->pin_function_tree,
-                                            indices[i]);
-               radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
-               devm_kfree(pctldev->dev, function);
-       }
+               radix_tree_delete(&pctldev->pin_function_tree, iter.index);
 
        pctldev->num_functions = 0;
 }
index 9aec1d2232dd830e2c19a8e1e394e6033e621c21..6624499eae72f5c2ba986c8c54c6f7e583f05f2a 100644 (file)
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x3, "owa")),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "spdif")),        /* DOUT */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out")),
index d390325c99ecf9487c9b4441fd0e25aec05378c7..abf6026645dd2308fba55179ca750b0e786da5fa 100644 (file)
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
                cmd = list_first_entry_or_null(&vscsi->free_cmd,
                                               struct ibmvscsis_cmd, list);
                if (cmd) {
+                       if (cmd->abort_cmd)
+                               cmd->abort_cmd = NULL;
                        cmd->flags &= ~(DELAY_SEND);
                        list_del(&cmd->list);
                        cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                if (cmd->abort_cmd) {
                                        retry = true;
                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
+                                       cmd->abort_cmd = NULL;
                                }
 
                                /*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                        list_del(&cmd->list);
                                        ibmvscsis_free_cmd_resources(vscsi,
                                                                     cmd);
+                                       /*
+                                        * With a successfully aborted op
+                                        * through LIO we want to increment the
+                                        * the vscsi credit so that when we dont
+                                        * send a rsp to the original scsi abort
+                                        * op (h_send_crq), but the tm rsp to
+                                        * the abort is sent, the credit is
+                                        * correctly sent with the abort tm rsp.
+                                        * We would need 1 for the abort tm rsp
+                                        * and 1 credit for the aborted scsi op.
+                                        * Thus we need to increment here.
+                                        * Also we want to increment the credit
+                                        * here because we want to make sure
+                                        * cmd is actually released first
+                                        * otherwise the client will think it
+                                        * it can send a new cmd, and we could
+                                        * find ourselves short of cmd elements.
+                                        */
+                                       vscsi->credit += 1;
                                } else {
                                        iue = cmd->iue;
 
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
 
        rsp->opcode = SRP_RSP;
 
-       if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-               rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-       else
-               rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+       rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
        rsp->tag = cmd->rsp.tag;
        rsp->flags = 0;
 
index 26a9bcd5ee6a40c391195ef13e205d8d936c8a54..0d8f81591bed076fa1f89f7cd27360776488f349 100644 (file)
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg)
 {
        int ret = 0;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
+
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
         * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3817,14 @@ get_immediate:
                        goto transport_err;
 
                ret = iscsit_handle_response_queue(conn);
-               if (ret == 1)
+               if (ret == 1) {
                        goto get_immediate;
-               else if (ret == -ECONNRESET)
+               } else if (ret == -ECONNRESET) {
+                       conn_freed = true;
                        goto out;
-               else if (ret < 0)
+               } else if (ret < 0) {
                        goto transport_err;
+               }
        }
 
 transport_err:
@@ -3830,8 +3834,13 @@ transport_err:
         * responsible for cleaning up the early connection failure.
         */
        if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-               iscsit_take_action_for_connection_exit(conn);
+               iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
        return 0;
 }
 
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg)
 {
        int rc;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
 
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg)
         */
        rc = wait_for_completion_interruptible(&conn->rx_login_comp);
        if (rc < 0 || iscsi_target_check_conn_state(conn))
-               return 0;
+               goto out;
 
        if (!conn->conn_transport->iscsit_get_rx_pdu)
                return 0;
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg)
 
        if (!signal_pending(current))
                atomic_set(&conn->transport_failed, 1);
-       iscsit_take_action_for_connection_exit(conn);
+       iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
+
        return 0;
 }
 
index 9a96e17bf7cd5f7448c880ffafcaa123730ebe71..7fe2aa73cff69e04f8df8d79e3af1c634fb5ca04 100644 (file)
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
        }
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+       *conn_freed = false;
+
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
                spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
                iscsit_close_connection(conn);
+               *conn_freed = true;
                return;
        }
 
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->state_lock);
 
        iscsit_handle_connection_cleanup(conn);
+       *conn_freed = true;
 }
index 60e69e2af6eda981efb74e4ac313fb0d031093bd..3822d9cd12302071467af03d4920fda601fdd351 100644 (file)
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 66238477137bc46d35cade3167451e19f2d401ce..92b96b51d5068e77c45d85a5a4d16efc9ffa5a93 100644 (file)
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
                        break;
        }
 
+       while (!kthread_should_stop()) {
+               msleep(100);
+       }
+
        return 0;
 }
index 7ccc9c1cbfd1a664fb4c37a5dd71f305e735f4bb..6f88b31242b0562b297e60fdf61552719ed7a97c 100644 (file)
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
        if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-               pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+               pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
                        "returning FALSE\n");
-               return false;
+               return true;
        }
-       return true;
+       return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = test_bit(flag, &conn->login_flags);
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               if (!state)
+                       clear_bit(flag, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
                        conn, current->comm, current->pid);
+       /*
+        * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+        * before initial PDU processing in iscsi_target_start_negotiation()
+        * has completed, go ahead and retry until it's cleared.
+        *
+        * Otherwise if the TCP connection drops while this is occuring,
+        * iscsi_target_start_negotiation() will detect the failure, call
+        * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+        * remaining iscsi connection resources from iscsi_np process context.
+        */
+       if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+               schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+               return;
+       }
 
        spin_lock(&tpg->tpg_state_lock);
        state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        if (!state) {
                pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
+               goto err;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-                       iscsi_target_restore_sock_callbacks(conn);
-                       iscsi_target_login_drop(conn, login);
-                       iscsit_deaccess_np(np, tpg, tpg_np);
-                       return;
-               }
+       if (iscsi_target_sk_check_close(conn)) {
+               pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+               goto err;
        }
 
        conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
        flush_signals(current);
        conn->login_kworker = NULL;
 
-       if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
-       }
+       if (rc < 0)
+               goto err;
 
        pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
                        conn, current->comm, current->pid);
 
        rc = iscsi_target_do_login(conn, login);
        if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
+               goto err;
        } else if (!rc) {
-               if (conn->sock) {
-                       struct sock *sk = conn->sock->sk;
-
-                       write_lock_bh(&sk->sk_callback_lock);
-                       clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-                       write_unlock_bh(&sk->sk_callback_lock);
-               }
+               if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+                       goto err;
        } else if (rc == 1) {
                iscsi_target_nego_release(conn);
                iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
+       return;
+
+err:
+       iscsi_target_restore_sock_callbacks(conn);
+       iscsi_target_login_drop(conn, login);
+       iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
                orig_state_change(sk);
                return;
        }
+       state = __iscsi_target_sk_check_close(sk);
+       pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
        if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
                         " conn: %p\n", conn);
+               if (state)
+                       set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
-       if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+       if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
                         conn);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
+       /*
+        * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+        * but only queue conn->login_work -> iscsi_target_do_login_rx()
+        * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+        *
+        * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+        * will detect the dropped TCP connection from delayed workqueue context.
+        *
+        * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+        * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+        * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+        * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+        * dropped TCP connection in iscsi_np process context, and cleaning up
+        * the remaining iscsi connection resources.
+        */
+       if (state) {
+               pr_debug("iscsi_target_sk_state_change got failed state\n");
+               set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+               state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
 
-       state = iscsi_target_sk_state_check(sk);
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+               orig_state_change(sk);
 
-       if (!state) {
-               pr_debug("iscsi_target_sk_state_change got failed state\n");
-               schedule_delayed_work(&conn->login_cleanup_work, 0);
+               if (!state)
+                       schedule_delayed_work(&conn->login_work, 0);
                return;
        }
+       write_unlock_bh(&sk->sk_callback_lock);
+
        orig_state_change(sk);
 }
 
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                        if (iscsi_target_handle_csg_one(conn, login) < 0)
                                return -1;
                        if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+                               /*
+                                * Check to make sure the TCP connection has not
+                                * dropped asynchronously while session reinstatement
+                                * was occuring in this kthread context, before
+                                * transitioning to full feature phase operation.
+                                */
+                               if (iscsi_target_sk_check_close(conn))
+                                       return -1;
+
                                login->tsih = conn->sess->tsih;
                                login->login_complete = 1;
                                iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                break;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-               bool state;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login() failed state for"
-                                " conn: %p\n", conn);
-                       return -1;
-               }
-       }
-
        return 0;
 }
 
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
 
                write_lock_bh(&sk->sk_callback_lock);
                set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+               set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
        }
-
+       /*
+        * If iscsi_target_do_login returns zero to signal more PDU
+        * exchanges are required to complete the login, go ahead and
+        * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+        * is still active.
+        *
+        * Otherwise if TCP connection dropped asynchronously, go ahead
+        * and perform connection cleanup now.
+        */
        ret = iscsi_target_do_login(conn, login);
+       if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+               ret = -1;
+
        if (ret < 0) {
                cancel_delayed_work_sync(&conn->login_work);
                cancel_delayed_work_sync(&conn->login_cleanup_work);
index 37f57357d4a0827f5669cb89d1619c4651192547..6025935036c976edeeee0d7a91df79a66aa84a2b 100644 (file)
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
        if (cmd->unknown_data_length) {
                cmd->data_length = size;
        } else if (size != cmd->data_length) {
-               pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+               pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cmd->t_task_cdb[0]);
 
-               if (cmd->data_direction == DMA_TO_DEVICE &&
-                   cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-                       pr_err("Rejecting underflow/overflow WRITE data\n");
-                       return TCM_INVALID_CDB_FIELD;
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+                               pr_err_ratelimited("Rejecting underflow/overflow"
+                                                  " for WRITE data CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
+                       /*
+                        * Some fabric drivers like iscsi-target still expect to
+                        * always reject overflow writes.  Reject this case until
+                        * full fabric driver level support for overflow writes
+                        * is introduced tree-wide.
+                        */
+                       if (size > cmd->data_length) {
+                               pr_err_ratelimited("Rejecting overflow for"
+                                                  " WRITE control CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
index 9045837f748bd3b602256cfa9e83058ae92b8b33..beb5f098f32d6f7bb5851deb810065ab37e4ac4a 100644 (file)
@@ -97,7 +97,7 @@ struct tcmu_hba {
 
 struct tcmu_dev {
        struct list_head node;
-
+       struct kref kref;
        struct se_device se_dev;
 
        char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
        if (!udev)
                return NULL;
+       kref_init(&udev->kref);
 
        udev->name = kstrdup(name, GFP_KERNEL);
        if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
        return 0;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct tcmu_dev *udev = TCMU_DEV(dev);
+
+       kfree(udev->uio_info.name);
+       kfree(udev->name);
+       kfree(udev);
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+       struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+       struct se_device *dev = &udev->se_dev;
+
+       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
        pr_debug("close\n");
-
+       /* release ref from configure */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        return 0;
 }
 
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /*
+        * Get a ref incase userspace does a close on the uio device before
+        * LIO has initiated tcmu_free_device.
+        */
+       kref_get(&udev->kref);
+
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
                                 udev->uio_info.uio_dev->minor);
        if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
        return 0;
 
 err_netlink:
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
 err_vzalloc:
        kfree(info->name);
+       info->name = NULL;
 
        return ret;
 }
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
        return -EINVAL;
 }
 
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
-       struct se_device *dev = container_of(p, struct se_device, rcu_head);
-       struct tcmu_dev *udev = TCMU_DEV(dev);
-
-       kfree(udev);
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
                                   udev->uio_info.uio_dev->minor);
 
                uio_unregister_device(&udev->uio_info);
-               kfree(udev->uio_info.name);
-               kfree(udev->name);
        }
-       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+
+       /* release ref from init */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
 }
 
 enum {
index c22eaf162f95c1456563b31a8362da9e531c9185..2a6889b3585f068c73091d8895639b7e941d702a 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1154,6 +1154,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
                goto out;
        }
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PMD fault that overlaps with
+        * the PTE we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+               vmf_ret = VM_FAULT_NOPAGE;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't bother to use iomap_apply here: DAX required
         * the file system block size to be equal the page size, which means
@@ -1397,6 +1408,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
        if (IS_ERR(entry))
                goto fallback;
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PTE fault that overlaps with
+        * the PMD we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
+                       !pmd_devmap(*vmf->pmd)) {
+               result = 0;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
index f865b96374df2b5c40ecfb13663154499ec09b31..d2955daf17a4fcefa2ded3a412f67732315de12a 100644 (file)
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
+       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
index 1a224a33a6c23c362e1bbacb8150bb9bbf02b3fd..e5686be67be8d361a32344e3aaaae235d739ffd7 100644 (file)
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        devname = nfs_devname(dentry, page, PAGE_SIZE);
        if (IS_ERR(devname))
-               mnt = (struct vfsmount *)devname;
+               mnt = ERR_CAST(devname);
        else
                mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
 
index 12feac6ee2fd461a46c7b06b7a0ed0359fb4dfd1..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        if (!p)
                return 0;
        p = xdr_decode_hyper(p, &args->offset);
-       args->count = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
 
+       args->count = ntohl(*p++);
        len = min(args->count, max_blocksize);
 
        /* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        args->count  = min_t(u32, args->count, PAGE_SIZE);
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
        args->dircount = ntohl(*p++);
        args->count    = ntohl(*p++);
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = args->count = min(args->count, max_blocksize);
        while (len > 0) {
                struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
                        args->buffer = page_address(p);
                len -= PAGE_SIZE;
        }
-       return 1;
+
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
index c453a1998e003d3e900407b266f1a15de5d5d94b..dadb3bf305b22f352a3f91a2df06b30284b4891c 100644 (file)
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        opdesc->op_get_currentstateid(cstate, &op->u);
                op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+               /* Only from SEQUENCE */
+               if (cstate->status == nfserr_replay_cache) {
+                       dprintk("%s NFS4.1 replay from cache\n", __func__);
+                       status = op->status;
+                       goto out;
+               }
                if (!op->status) {
                        if (opdesc->op_set_currentstateid)
                                opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        if (need_wrongsec_check(rqstp))
                                op->status = check_nfsd_access(current_fh->fh_export, rqstp);
                }
-
 encode_op:
-               /* Only from SEQUENCE */
-               if (cstate->status == nfserr_replay_cache) {
-                       dprintk("%s NFS4.1 replay from cache\n", __func__);
-                       status = op->status;
-                       goto out;
-               }
                if (op->status == nfserr_replay_me) {
                        op->replay = &cstate->replay_owner->so_replay;
                        nfsd4_encode_replay(&resp->xdr, op);
index 6a4947a3f4fa82be4118e4ed538a171118f4baa8..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        len = args->count     = ntohl(*p++);
        p++; /* totalcount - unused */
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
 
        /* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->cookie = ntohl(*p++);
        args->count  = ntohl(*p++);
        args->count  = min_t(u32, args->count, PAGE_SIZE);
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 /*
index 358258364616cd3c2fee997daca2a192719cb045..4690cd75d8d7948a056fe899bc4600ade10b8566 100644 (file)
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
                                        PTR_ERR(dent_inode));
                kfree(name);
                /* Return the error code. */
-               return (struct dentry *)dent_inode;
+               return ERR_CAST(dent_inode);
        }
        /* It is guaranteed that @name is no longer allocated at this point. */
        if (MREF_ERR(mref) == -ENOENT) {
index 827fc9809bc271f09b2c3b7abf4019c31d0e1636..9f88188060db9c7fa59e6882ecf33b55cf921788 100644 (file)
@@ -119,7 +119,7 @@ check_err:
 
        if (IS_ERR(inode)) {
                mlog_errno(PTR_ERR(inode));
-               result = (void *)inode;
+               result = ERR_CAST(inode);
                goto bail;
        }
 
index 0daac5112f7a32384b5febd39bd8499f31da8c31..c0c9683934b7a7883ab59eb8bbcd412e07385d0b 100644 (file)
@@ -1,5 +1,6 @@
 config OVERLAY_FS
        tristate "Overlay filesystem support"
+       select EXPORTFS
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
          and a 'lower' filesystem.  When a name exists in both filesystems, the
index 9008ab9fbd2ebe89d419c249455eb740c48a9eb1..7a44533f4bbf24134a95bdc030bde5779f28457a 100644 (file)
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                        return PTR_ERR(fh);
        }
 
-       err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        */
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
+                                fh ? fh->len : 0, 0);
        kfree(fh);
 
        return err;
@@ -342,13 +346,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        if (tmpfile)
                temp = ovl_do_tmpfile(upperdir, stat->mode);
        else
-               temp = ovl_lookup_temp(workdir, dentry);
-       err = PTR_ERR(temp);
-       if (IS_ERR(temp))
-               goto out1;
-
+               temp = ovl_lookup_temp(workdir);
        err = 0;
-       if (!tmpfile)
+       if (IS_ERR(temp)) {
+               err = PTR_ERR(temp);
+               temp = NULL;
+       }
+
+       if (!err && !tmpfile)
                err = ovl_create_real(wdir, temp, &cattr, NULL, true);
 
        if (new_creds) {
@@ -454,6 +459,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
+       /* Mark parent "impure" because it may now contain non-pure upper */
+       err = ovl_set_impure(parent, upperdir);
+       if (err)
+               return err;
+
        err = vfs_getattr(&parentpath, &pstat,
                          STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
        if (err)
index 723b98b9069876d1656b74735dabcaf01484e26d..a63a71656e9bdaef6ed5cadf8acdb6d8002fe1b6 100644 (file)
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        }
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+struct dentry *ovl_lookup_temp(struct dentry *workdir)
 {
        struct dentry *temp;
        char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
        struct dentry *whiteout;
        struct inode *wdir = workdir->d_inode;
 
-       whiteout = ovl_lookup_temp(workdir, dentry);
+       whiteout = ovl_lookup_temp(workdir);
        if (IS_ERR(whiteout))
                return whiteout;
 
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
        return err;
 }
 
-static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
+                              int xerr)
 {
        int err;
 
-       err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
        if (!err)
                ovl_dentry_set_opaque(dentry);
 
        return err;
 }
 
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+{
+       /*
+        * Fail with -EIO when trying to create opaque dir and upper doesn't
+        * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
+        * return a specific error for noxattr case.
+        */
+       return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
+}
+
 /* Common operations required to be done after creation of file on upper */
 static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
                            struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
        return OVL_TYPE_MERGE(ovl_path_type(dentry));
 }
 
+static bool ovl_type_origin(struct dentry *dentry)
+{
+       return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
                            struct cattr *attr, struct dentry *hardlink)
 {
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        if (upper->d_parent->d_inode != udir)
                goto out_unlock;
 
-       opaquedir = ovl_lookup_temp(workdir, dentry);
+       opaquedir = ovl_lookup_temp(workdir);
        err = PTR_ERR(opaquedir);
        if (IS_ERR(opaquedir))
                goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (err)
                goto out;
 
-       newdentry = ovl_lookup_temp(workdir, dentry);
+       newdentry = ovl_lookup_temp(workdir);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
        if (IS_ERR(redirect))
                return PTR_ERR(redirect);
 
-       err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
-                             redirect, strlen(redirect), 0);
+       err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
+                                OVL_XATTR_REDIRECT,
+                                redirect, strlen(redirect), -EXDEV);
        if (!err) {
                spin_lock(&dentry->d_lock);
                ovl_dentry_set_redirect(dentry, redirect);
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               if (err == -EOPNOTSUPP)
-                       ovl_clear_redirect_dir(dentry->d_sb);
-               else
-                       pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        old_upperdir = ovl_dentry_upper(old->d_parent);
        new_upperdir = ovl_dentry_upper(new->d_parent);
 
+       if (!samedir) {
+               /*
+                * When moving a merge dir or non-dir with copy up origin into
+                * a new parent, we are marking the new parent dir "impure".
+                * When ovl_iterate() iterates an "impure" upper dir, it will
+                * lookup the origin inodes of the entries to fill d_ino.
+                */
+               if (ovl_type_origin(old)) {
+                       err = ovl_set_impure(new->d_parent, new_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+               if (!overwrite && ovl_type_origin(new)) {
+                       err = ovl_set_impure(old->d_parent, old_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+       }
+
        trap = lock_rename(new_upperdir, old_upperdir);
 
        olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(old))
                        err = ovl_set_redirect(old, samedir);
                else if (!old_opaque && ovl_type_merge(new->d_parent))
-                       err = ovl_set_opaque(old, olddentry);
+                       err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(new))
                        err = ovl_set_redirect(new, samedir);
                else if (!new_opaque && ovl_type_merge(old->d_parent))
-                       err = ovl_set_opaque(new, newdentry);
+                       err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
index ad9547f82da57fa4bd51eb5738cb8f02235e4455..d613e2c41242a52a6c018f43f9987bdbf461e0bb 100644 (file)
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
        return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+       /* List all non-trusted xatts */
+       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+               return true;
+
+       /* Never list trusted.overlay, list other trusted for superuser only */
+       return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                        return -EIO;
 
                len -= slen;
-               if (ovl_is_private_xattr(s)) {
+               if (!ovl_can_list(s)) {
                        res -= slen;
                        memmove(s, s + slen, len);
                } else {
index bad0f665a63521efde00b4c488d4ed2ba85a5b75..f3136c31e72af24cbb9949449a12d292fc3bf11b 100644 (file)
@@ -169,17 +169,7 @@ invalid:
 
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
-       int res;
-       char val;
-
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
 }
 
 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        unsigned int ctr = 0;
        struct inode *inode = NULL;
        bool upperopaque = false;
+       bool upperimpure = false;
        char *upperredirect = NULL;
        struct dentry *this;
        unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                                poe = roe;
                }
                upperopaque = d.opaque;
+               if (upperdentry && d.is_dir)
+                       upperimpure = ovl_is_impuredir(upperdentry);
        }
 
        if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        revert_creds(old_cred);
        oe->opaque = upperopaque;
+       oe->impure = upperimpure;
        oe->redirect = upperredirect;
        oe->__upperdentry = upperdentry;
        memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
index caa36cb9c46de9838805dc40e21672217407d40e..0623cebeefff8661d49d65a228ceec6290cee877 100644 (file)
@@ -24,6 +24,7 @@ enum ovl_path_type {
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
 
 /*
  * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
+bool ovl_dentry_is_impure(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
 bool ovl_redirect_dir(struct super_block *sb);
-void ovl_clear_redirect_dir(struct super_block *sb);
 const char *ovl_dentry_get_redirect(struct dentry *dentry);
 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry);
 void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr);
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+
+static inline bool ovl_is_impuredir(struct dentry *dentry)
+{
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
+}
+
 
 /* namei.c */
 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct dentry *workdir);
 struct cattr {
        dev_t rdev;
        umode_t mode;
index b2023ddb85323725b8bbfa687f31fc854c1ba5e9..34bc4a9f5c61d95f049b3f34ccd0de243aa27ddd 100644 (file)
@@ -28,6 +28,7 @@ struct ovl_fs {
        /* creds of process who forced instantiation of super block */
        const struct cred *creator_cred;
        bool tmpfile;
+       bool noxattr;
        wait_queue_head_t copyup_wq;
        /* sb common to all layers */
        struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
                        u64 version;
                        const char *redirect;
                        bool opaque;
+                       bool impure;
                        bool copying;
                };
                struct rcu_head rcu;
index 9828b7de89992e64a1900a277b58423dde7b992a..4882ffb37baead1c4da41684158d22cbe58e5353 100644 (file)
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                                dput(temp);
                        else
                                pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+
+                       /*
+                        * Check if upper/work fs supports trusted.overlay.*
+                        * xattr
+                        */
+                       err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
+                                             "0", 1, 0);
+                       if (err) {
+                               ufs->noxattr = true;
+                               pr_warn("overlayfs: upper fs does not support xattr.\n");
+                       } else {
+                               vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
+                       }
                }
        }
 
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        path_put(&workpath);
        kfree(lowertmp);
 
-       oe->__upperdentry = upperpath.dentry;
+       if (upperpath.dentry) {
+               oe->__upperdentry = upperpath.dentry;
+               oe->impure = ovl_is_impuredir(upperpath.dentry);
+       }
        for (i = 0; i < numlower; i++) {
                oe->lowerstack[i].dentry = stack[i].dentry;
                oe->lowerstack[i].mnt = ufs->lower_mnt[i];
index cfdea47313a10e22a9c06193e4cc422891badaae..809048913889189d083339d1d015ef4cad2af035 100644 (file)
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
        return oe->opaque;
 }
 
+bool ovl_dentry_is_impure(struct dentry *dentry)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       return oe->impure;
+}
+
 bool ovl_dentry_is_whiteout(struct dentry *dentry)
 {
        return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       return ofs->config.redirect_dir;
-}
-
-void ovl_clear_redirect_dir(struct super_block *sb)
-{
-       struct ovl_fs *ofs = sb->s_fs_info;
-
-       ofs->config.redirect_dir = false;
+       return ofs->config.redirect_dir && !ofs->noxattr;
 }
 
 const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
        wake_up_locked(&ofs->copyup_wq);
        spin_unlock(&ofs->copyup_wq.lock);
 }
+
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
+{
+       int res;
+       char val;
+
+       if (!d_is_dir(dentry))
+               return false;
+
+       res = vfs_getxattr(dentry, name, &val, 1);
+       if (res == 1 && val == 'y')
+               return true;
+
+       return false;
+}
+
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr)
+{
+       int err;
+       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+       if (ofs->noxattr)
+               return xerr;
+
+       err = ovl_do_setxattr(upperdentry, name, value, size, 0);
+
+       if (err == -EOPNOTSUPP) {
+               pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+               ofs->noxattr = true;
+               return xerr;
+       }
+
+       return err;
+}
+
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+{
+       int err;
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       if (oe->impure)
+               return 0;
+
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        * Upper inodes won't have origin nor redirect xattr anyway.
+        */
+       err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
+                                "y", 1, 0);
+       if (!err)
+               oe->impure = true;
+
+       return err;
+}
index 45f6bf68fff3ed30df0f85d98c0f644c320a9bf7..f1e1927ccd484e7372fe2a38db7455468bbf06e8 100644 (file)
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        if (!mmget_not_zero(mm))
                goto free;
 
-       flags = write ? FOLL_WRITE : 0;
+       flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 
        while (count > 0) {
                int this_len = min_t(int, count, PAGE_SIZE);
index da01f497180a165d163935c4744ef82521bf9151..39bb1e838d8da683fa64dadf7ff150b9369d699e 100644 (file)
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
                depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
                        __sync_dirty_buffer(jl->j_commit_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
                reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
 
                if (reiserfs_barrier_flush(sb))
                        __sync_dirty_buffer(journal->j_header_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
index 62fa39276a24bd91c26e3aa18c9f162f19842b95..07b77b73b0240c5cca4187d29e8e403cbf4f0714 100644 (file)
@@ -97,12 +97,16 @@ static inline void
 xfs_buf_ioacct_inc(
        struct xfs_buf  *bp)
 {
-       if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+       if (bp->b_flags & XBF_NO_IOACCT)
                return;
 
        ASSERT(bp->b_flags & XBF_ASYNC);
-       bp->b_flags |= _XBF_IN_FLIGHT;
-       percpu_counter_inc(&bp->b_target->bt_io_count);
+       spin_lock(&bp->b_lock);
+       if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+               bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_inc(&bp->b_target->bt_io_count);
+       }
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
  * freed and unaccount from the buftarg.
  */
 static inline void
-xfs_buf_ioacct_dec(
+__xfs_buf_ioacct_dec(
        struct xfs_buf  *bp)
 {
-       if (!(bp->b_flags & _XBF_IN_FLIGHT))
-               return;
+       ASSERT(spin_is_locked(&bp->b_lock));
 
-       bp->b_flags &= ~_XBF_IN_FLIGHT;
-       percpu_counter_dec(&bp->b_target->bt_io_count);
+       if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+               bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_dec(&bp->b_target->bt_io_count);
+       }
+}
+
+static inline void
+xfs_buf_ioacct_dec(
+       struct xfs_buf  *bp)
+{
+       spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -149,9 +163,9 @@ xfs_buf_stale(
         * unaccounted (released to LRU) before that occurs. Drop in-flight
         * status now to preserve accounting consistency.
         */
-       xfs_buf_ioacct_dec(bp);
-
        spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+
        atomic_set(&bp->b_lru_ref, 0);
        if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
            (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
                 * ensures the decrement occurs only once per-buf.
                 */
                if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-                       xfs_buf_ioacct_dec(bp);
+                       __xfs_buf_ioacct_dec(bp);
                goto out_unlock;
        }
 
        /* the last reference has been dropped ... */
-       xfs_buf_ioacct_dec(bp);
+       __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
                 * If the buffer is added to the LRU take a new reference to the
index 8d1d44f87ce98834ad67cee6840d96d07d5666c0..1508121f29f29191da1a4efc7c8f12cf42eb0107 100644 (file)
@@ -63,7 +63,6 @@ typedef enum {
 #define _XBF_KMEM       (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q   (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND   (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT  (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
-       { _XBF_COMPOUND,        "COMPOUND" }, \
-       { _XBF_IN_FLIGHT,       "IN_FLIGHT" }
+       { _XBF_COMPOUND,        "COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE      (1 << 0)       /* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
index c0bd0d7651a947bf06407d7c680dde5c377b9b26..bb837310c07e98c529472abceda0a8b426d8c9a5 100644 (file)
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
 int drm_dp_stop_crc(struct drm_dp_aux *aux);
 
+struct drm_dp_dpcd_ident {
+       u8 oui[3];
+       u8 device_id[6];
+       u8 hw_rev;
+       u8 sw_major_rev;
+       u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+       struct drm_dp_dpcd_ident ident;
+       u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+       /**
+        * @DP_DPCD_QUIRK_LIMITED_M_N:
+        *
+        * The device requires main link attributes Mvid and Nvid to be limited
+        * to 16 bits.
+        */
+       DP_DPCD_QUIRK_LIMITED_M_N,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+       return desc->quirks & BIT(quirk);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
index 2b1a44f5bdb60e6a28d874630f71dec7a0f4c40b..a89d37e8b3873cc8e6fa79389a6c0d3a2f4843ab 100644 (file)
@@ -41,7 +41,7 @@ struct vm_area_struct;
 #define ___GFP_WRITE           0x800000u
 #define ___GFP_KSWAPD_RECLAIM  0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP       0x4000000u
+#define ___GFP_NOLOCKDEP       0x2000000u
 #else
 #define ___GFP_NOLOCKDEP       0
 #endif
index c0d712d22b079ebc16129ef2618f41762276cf5e..f738d50cc17d3fcaa0b9b7cf681b70dd2646897d 100644 (file)
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
        .flags = _flags,                                                  \
 }
 
+#ifdef CONFIG_GPIOLIB
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+#else
+static inline
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
+static inline
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+#endif
 
 #endif /* __LINUX_GPIO_MACHINE_H */
index 36872fbb815d72203e14582e3dab6ba5051ee8a7..734377ad42e9f0e4719edc4750b57245fa2d3f17 100644 (file)
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
+
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
 extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
index 4ce24a3762627be20e805da3eaab26ba1bc47578..8098695e5d8d9dfba3815fd359823f92833a5d61 100644 (file)
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
        return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr)
+{
+       return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
index 7cb17c6b97de38b1e8d55ca6d8c90b8415a9c3fa..b892e95d4929d311b51877dfb9eb3de67780bbdf 100644 (file)
@@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 
+static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+{
+       if (vm_fault & VM_FAULT_OOM)
+               return -ENOMEM;
+       if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+               return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+       if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+               return -EFAULT;
+       return 0;
+}
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index ebaccd4e7d8cdc5f5ef13fed1475a8b202d93628..ef6a13b7bd3e851385bea32434e207a5cf6eec7f 100644 (file)
@@ -678,6 +678,7 @@ typedef struct pglist_data {
         * is the first PFN that needs to be initialised.
         */
        unsigned long first_deferred_pfn;
+       unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 566fda587fcf7a76af1c6a01e84ce33336606f3f..3f74ef2281e8afac1e4667b4fbf4abcc5f89ff17 100644 (file)
@@ -467,6 +467,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
        DMI_BOARD_VERSION,
index 279e3c5326e3a4e65bf6f86556ea984a69d0ca9c..7620eb127cffc5edbc475457732a042bac357055 100644 (file)
@@ -42,8 +42,6 @@
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *     impedance to VDD). If the argument is != 0 pull-up is enabled,
  *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
- * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
- *     input and output operations.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
  *     which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
        PIN_CONFIG_BIAS_PULL_DOWN,
        PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
        PIN_CONFIG_BIAS_PULL_UP,
-       PIN_CONFIG_BIDIRECTIONAL,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
        PIN_CONFIG_DRIVE_PUSH_PULL,
index 94631026f79c56f022976a85dcde92379507e87c..11cef5a7bc87a9fe67a4bfbfca9f52e41d0abd88 100644 (file)
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 {
        char *cp = (char *)p;
        struct kvec *vec = &rqstp->rq_arg.head[0];
-       return cp == (char *)vec->iov_base + vec->iov_len;
+       return cp >= (char*)vec->iov_base
+               && cp <= (char*)vec->iov_base + vec->iov_len;
 }
 
 static inline int
index 275581d483ddd90d97c550ee8bf44d705833ecf8..5f17fb770477bbdfa2729a7b35cf21f70493515e 100644 (file)
@@ -557,6 +557,7 @@ struct iscsi_conn {
 #define LOGIN_FLAGS_READ_ACTIVE                1
 #define LOGIN_FLAGS_CLOSED             2
 #define LOGIN_FLAGS_READY              4
+#define LOGIN_FLAGS_INITIAL_PDU                8
        unsigned long           login_flags;
        struct delayed_work     login_work;
        struct delayed_work     login_cleanup_work;
index 0450225579367eee10de6695679fffa7b963f013..ec4565122e6553f490dfb2434b6c14102b152bb2 100644 (file)
@@ -10,6 +10,7 @@ config LIVEPATCH
        depends on SYSFS
        depends on KALLSYMS_ALL
        depends on HAVE_LIVEPATCH
+       depends on !TRIM_UNUSED_KSYMS
        help
          Say Y here if you want to support kernel live patching.
          This option has no runtime impact until a kernel "patch"
index d9e6fddcc51f06a1286c56a24c510c1a3efa8add..b3c7214d710d5ea8bab8648b5182c53d882f3c31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 
        ret = handle_mm_fault(vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, *flags);
+
+               if (err)
+                       return err;
                BUG();
        }
 
@@ -723,12 +721,10 @@ retry:
        ret = handle_mm_fault(vma, address, fault_flags);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return -EHWPOISON;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, 0);
+
+               if (err)
+                       return err;
                BUG();
        }
 
index e5828875f7bbd7a770d5c23334a0e3994ffe544f..3eedb187e5496f36f7f3186267f475254bcda5ab 100644 (file)
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
                        if (ret & VM_FAULT_ERROR) {
+                               int err = vm_fault_to_errno(ret, flags);
+
+                               if (err)
+                                       return err;
+
                                remainder = 0;
                                break;
                        }
index d9fc0e4561283d9a351f6dd7c4cfb74ad26ab566..216184af0e192b5405efc5a594129fa7c53ae953 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                goto out;
 
        if (PageTransCompound(page)) {
-               err = split_huge_page(page);
-               if (err)
+               if (split_huge_page(page))
                        goto out_unlock;
        }
 
index b049c9b2dba8718a6f57777591c2f2753d8d40ad..7b8a5db76a2fec7331f09048f3c19ebdfddf9f16 100644 (file)
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
        }
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+       struct memblock_region *rgn;
+       unsigned long size = 0;
+       int idx;
+
+       for_each_memblock_type((&memblock.reserved), rgn) {
+               phys_addr_t start, end;
+
+               if (rgn->base + rgn->size < start_addr)
+                       continue;
+               if (rgn->base > end_addr)
+                       continue;
+
+               start = rgn->base;
+               end = start + rgn->size;
+               size += end - start;
+       }
+
+       return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
        pr_info("MEMBLOCK configuration:\n");
index 2527dfeddb003d245ac2e2bd964134030426f777..342fac9ba89b0da3e207b1fdaef2be71c9837a24 100644 (file)
@@ -1595,12 +1595,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
                        pfn, ret, page->flags, &page->flags);
-               /*
-                * We know that soft_offline_huge_page() tries to migrate
-                * only one hugepage pointed to by hpage, so we need not
-                * run through the pagelist here.
-                */
-               putback_active_hugepage(hpage);
+               if (!list_empty(&pagelist))
+                       putback_movable_pages(&pagelist);
                if (ret > 0)
                        ret = -EIO;
        } else {
index 6ff5d729ded0ecd3a5607d10248697a786091f7e..2e65df1831d941dcd1282c56312bdbd153df0a79 100644 (file)
@@ -3029,6 +3029,17 @@ static int __do_fault(struct vm_fault *vmf)
        return ret;
 }
 
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+       return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
 static int pte_alloc_one_map(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3063,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 map_pte:
        /*
         * If a huge pmd materialized under us just retry later.  Use
-        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
-        * didn't become pmd_trans_huge under us and then back to pmd_none, as
-        * a result of MADV_DONTNEED running immediately after a huge pmd fault
-        * in a different thread of this mm, in turn leading to a misleading
-        * pmd_trans_huge() retval.  All we have to ensure is that it is a
-        * regular pmd that we can walk with pte_offset_map() and we can do that
-        * through an atomic read in C, which is what pmd_trans_unstable()
-        * provides.
+        * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+        * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+        * under us and then back to pmd_none, as a result of MADV_DONTNEED
+        * running immediately after a huge pmd fault in a different thread of
+        * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+        * All we have to ensure is that it is a regular pmd that we can walk
+        * with pte_offset_map() and we can do that through an atomic read in
+        * C, which is what pmd_trans_unstable() provides.
         */
-       if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+       if (pmd_devmap_trans_unstable(vmf->pmd))
                return VM_FAULT_NOPAGE;
 
+       /*
+        * At this point we know that our vmf->pmd points to a page of ptes
+        * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+        * for the duration of the fault.  If a racing MADV_DONTNEED runs and
+        * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+        * be valid and we will re-check to make sure the vmf->pte isn't
+        * pte_none() under vmf->ptl protection when we return to
+        * alloc_set_pte().
+        */
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                        &vmf->ptl);
        return 0;
@@ -3690,7 +3710,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
                vmf->pte = NULL;
        } else {
                /* See comment in pte_alloc_one_map() */
-               if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+               if (pmd_devmap_trans_unstable(vmf->pmd))
                        return 0;
                /*
                 * A regular pmd is established and it can't morph into a huge
index c483c5c20b4bd12bcca50972c9f74a0dbd3a713e..b562b5523a6544e6c0ae6e4f792943441f6217a3 100644 (file)
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked;
+       int delta_munlocked = -nr;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                                continue;
                        else
                                __munlock_isolation_failed(page);
+               } else {
+                       delta_munlocked++;
                }
 
                /*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                pagevec_add(&pvec_putback, pvec->pages[i]);
                pvec->pages[i] = NULL;
        }
-       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(zone_lru_lock(zone));
 
index f9e450c6b6e414d61b00d5a61be9cdea3b773e1b..2302f250d6b1ba150e3c2e4e17cfb6c99574ab5b 100644 (file)
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+       unsigned long max_initialise;
+       unsigned long reserved_lowmem;
+
+       /*
+        * Initialise at least 2G of a node but also take into account that
+        * two large system hashes that can take up 1GB for 0.25TB/node.
+        */
+       max_initialise = max(2UL << (30 - PAGE_SHIFT),
+               (pgdat->node_spanned_pages >> 8));
+
+       /*
+        * Compensate the all the memblock reservations (e.g. crash kernel)
+        * from the initial estimation to make sure we will initialize enough
+        * memory to boot.
+        */
+       reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+                       pgdat->node_start_pfn + max_initialise);
+       max_initialise += reserved_lowmem;
+
+       pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
                                unsigned long pfn, unsigned long zone_end,
                                unsigned long *nr_initialised)
 {
-       unsigned long max_initialise;
-
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
-       /*
-        * Initialise at least 2G of a node but also take into account that
-        * two large system hashes that can take up 1GB for 0.25TB/node.
-        */
-       max_initialise = max(2UL << (30 - PAGE_SHIFT),
-               (pgdat->node_spanned_pages >> 8));
-
        (*nr_initialised)++;
-       if ((*nr_initialised > max_initialise) &&
+       if ((*nr_initialised > pgdat->static_init_size) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -3870,7 +3881,9 @@ retry:
                goto got_pg;
 
        /* Avoid allocations with no watermarks from looping endlessly */
-       if (test_thread_flag(TIF_MEMDIE))
+       if (test_thread_flag(TIF_MEMDIE) &&
+           (alloc_flags == ALLOC_NO_WATERMARKS ||
+            (gfp_mask & __GFP_NOMEMALLOC)))
                goto nopage;
 
        /* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
+       reset_deferred_meminit(pgdat);
        free_area_init_core(pgdat);
 }
 
index 57e5156f02be6bcc23e70ec801e9cc1c3bbdd631..7449593fca724147cef5b8f7a46752333e5e0585 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                char mbuf[64];
                char *buf;
                struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+               ssize_t len;
 
                if (!attr || !attr->store || !attr->show)
                        continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                        buf = buffer;
                }
 
-               attr->show(root_cache, buf);
-               attr->store(s, buf, strlen(buf));
+               len = attr->show(root_cache, buf);
+               if (len > 0)
+                       attr->store(s, buf, len);
        }
 
        if (buffer)
index 464df34899031d46058b7cbadc1c3be24ae8dc89..26be6407abd7efe452a585d341a8d7e5b53d2b32 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
        WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
 
        /*
-        * Make sure that larger requests are not too disruptive - no OOM
-        * killer and no allocation failure warnings as we have a fallback
+        * We want to attempt a large physically contiguous block first because
+        * it is less likely to fragment multiple larger blocks and therefore
+        * contribute to a long term fragmentation less than vmalloc fallback.
+        * However make sure that larger requests are not too disruptive - no
+        * OOM killer and no allocation failure warnings as we have a fallback.
         */
        if (size > PAGE_SIZE) {
                kmalloc_flags |= __GFP_NOWARN;
index f9b92ece78343a463a37d33e3f73fa8621b84444..5afd1098e33a173a18b2310aa24d205534df711c 100644 (file)
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command):
         super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
 
     def invoke(self, arg, from_tty):
-        log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
-        log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
-        log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
-        log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
+        log_buf_addr = int(str(gdb.parse_and_eval(
+            "'printk.c'::log_buf")).split()[0], 16)
+        log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+        log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+        log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
 
         inf = gdb.inferiors()[0]
         start = log_buf_addr + log_first_idx
index 918e45268915de1c64e5b8b783ba423488e8f319..a57988d617e934847bff6b56f08b64b813071875 100644 (file)
@@ -2324,11 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
-       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
-       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
index dc48eedea92e7aaaba64f4db4053fea1b73d0b77..26ed23b18b7774fd495f7ce90e51ec6dc8022e2d 100644 (file)
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
        struct snd_usb_audio *chip = elem->head.mixer->chip;
        struct snd_us16x08_meter_store *store = elem->private_data;
        u8 meter_urb[64];
-       char tmp[sizeof(mix_init_msg2)] = {0};
 
        switch (kcontrol->private_value) {
-       case 0:
-               snd_us16x08_send_urb(chip, (char *)mix_init_msg1,
-                                    sizeof(mix_init_msg1));
+       case 0: {
+               char tmp[sizeof(mix_init_msg1)];
+
+               memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1));
+               snd_us16x08_send_urb(chip, tmp, 4);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
+       }
        case 1:
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
-       case 3:
+       case 3: {
+               char tmp[sizeof(mix_init_msg2)];
+
                memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2));
                tmp[2] = snd_get_meter_comp_index(store);
-               snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2));
+               snd_us16x08_send_urb(chip, tmp, 10);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value = 0;
                break;
        }
+       }
 
        for (set = 0; set < 6; set++)
                get_meter_levels_from_urb(set, store, meter_urb);
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = {
                .control_id = SND_US16X08_ID_EQLOWMIDWIDTH,
                .type = USB_MIXER_U8,
                .num_channels = 16,
-               .name = "EQ MidQLow Q",
+               .name = "EQ MidLow Q",
        },
        { /* EQ mid high gain */
                .kcontrol_new = &snd_us16x08_eq_gain_ctl,
index c0c48507e44e2992b3624b2374cd08769a8bb607..ad0543e21760562d94bc0a44676700b1179c1787 100644 (file)
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4
 endchoice
 
 config INITRAMFS_COMPRESSION
+       depends on INITRAMFS_SOURCE!=""
        string
        default ""      if INITRAMFS_COMPRESSION_NONE
        default ".gz"   if INITRAMFS_COMPRESSION_GZIP