]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'pm-cpufreq', 'intel_pstate' and 'pm-cpuidle'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 3 Jul 2017 12:21:18 +0000 (14:21 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 3 Jul 2017 12:21:18 +0000 (14:21 +0200)
* pm-cpufreq:
  cpufreq / CPPC: Initialize policy->min to lowest nonlinear performance
  cpufreq: sfi: make freq_table static
  cpufreq: exynos5440: Fix inconsistent indenting
  cpufreq: imx6q: imx6ull should use the same flow as imx6ul
  cpufreq: dt: Add support for hi3660

* intel_pstate:
  cpufreq: Update scaling_cur_freq documentation
  cpufreq: intel_pstate: Clean up after performance governor changes
  intel_pstate: skip scheduler hook when in "performance" mode
  intel_pstate: delete scheduler hook in HWP mode
  x86: use common aperfmperf_khz_on_cpu() to calculate KHz using APERF/MPERF
  cpufreq: intel_pstate: Remove max/min fractions to limit performance
  x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz"

* pm-cpuidle:
  cpuidle: menu: allow state 0 to be disabled
  intel_idle: Use more common logging style
  x86/ACPI/cstate: Allow ACPI C1 FFH MWAIT use on AMD systems
  ARM: cpuidle: Support asymmetric idle definition

254 files changed:
Documentation/devicetree/bindings/clock/sunxi-ccu.txt
Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
Documentation/devicetree/bindings/mfd/stm32-timers.txt
Documentation/devicetree/bindings/net/dsa/b53.txt
Documentation/devicetree/bindings/net/smsc911x.txt
MAINTAINERS
Makefile
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/mips/kvm/tlb.c
arch/powerpc/include/asm/kprobes.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_interrupts.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/perf/perf_regs.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/s390/include/asm/sysinfo.h
arch/s390/kernel/sysinfo.c
arch/s390/kvm/gaccess.c
arch/x86/events/intel/core.c
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/acpi/cstate.c
arch/x86/kvm/emulate.c
arch/x86/kvm/x86.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/partitions/ldm.c
block/partitions/ldm.h
drivers/acpi/acpi_extlog.c
drivers/acpi/apei/ghes.c
drivers/acpi/bus.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/scan.c
drivers/acpi/utils.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/char/random.c
drivers/char/tpm/tpm_crb.c
drivers/char/tpm/tpm_ppi.c
drivers/clk/meson/Kconfig
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/ccu-sun50i-a64.h
drivers/clk/sunxi-ng/ccu-sun5i.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
drivers/clk/sunxi-ng/ccu-sun8i-h3.h
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/cadence_ttc_timer.c
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/sfi-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/cpuidle-arm.c
drivers/cpuidle/governors/menu.c
drivers/gpio/gpio-mvebu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/i2c/busses/i2c-imx.c
drivers/idle/intel_idle.c
drivers/input/misc/soc_button_array.c
drivers/input/rmi4/rmi_f54.c
drivers/input/serio/i8042-x86ia64io.h
drivers/iommu/dmar.c
drivers/irqchip/irq-mips-gic.c
drivers/md/dm-integrity.c
drivers/md/dm-io.c
drivers/md/dm-raid1.c
drivers/md/md.c
drivers/mfd/arizona-core.c
drivers/mmc/host/sdhci-pci-core.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/ntb/hw/intel/ntb_hw_intel.c
drivers/ntb/ntb_transport.c
drivers/ntb/test/ntb_perf.c
drivers/nvdimm/btt_devs.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/target/nvmet.h
drivers/pci/pci-acpi.c
drivers/pci/pci-label.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_debug.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_internal.h
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/host/xhci-pci.c
drivers/usb/misc/ucsi.c
drivers/usb/typec/typec_wcove.c
drivers/xen/tmem.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/main.c
fs/autofs4/dev-ioctl.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/xattr.c
fs/dax.c
fs/exec.c
fs/ext4/super.c
fs/f2fs/super.c
fs/gfs2/ops_fstype.c
fs/gfs2/sys.c
fs/nfsd/export.c
fs/ocfs2/dlmglue.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/overlayfs/copy_up.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/ufs/balloc.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/ufs_fs.h
fs/xfs/Makefile
fs/xfs/uuid.c [deleted file]
fs/xfs/uuid.h [deleted file]
fs/xfs/xfs_aops.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
include/acpi/acpi_bus.h
include/dt-bindings/clock/sun50i-a64-ccu.h
include/dt-bindings/clock/sun8i-h3-ccu.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/cleancache.h
include/linux/fs.h
include/linux/genhd.h
include/linux/nvme-fc.h
include/linux/nvme.h
include/linux/pci-acpi.h
include/linux/slub_def.h
include/linux/timekeeper_internal.h
include/linux/uuid.h
include/net/wext.h
include/uapi/linux/uuid.h
kernel/events/ring_buffer.c
kernel/livepatch/patch.c
kernel/livepatch/transition.c
kernel/signal.c
kernel/sysctl_binary.c
kernel/time/timekeeping.c
lib/cmdline.c
lib/test_uuid.c
lib/uuid.c
lib/vsprintf.c
mm/cleancache.c
mm/khugepaged.c
mm/mmap.c
mm/shmem.c
mm/slub.c
mm/vmalloc.c
net/8021q/vlan.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/rtnetlink.c
net/decnet/dn_route.c
net/ipv4/igmp.c
net/ipv4/ip_tunnel.c
net/ipv6/addrconf.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_tunnel.c
net/rxrpc/key.c
net/sctp/endpointola.c
net/sctp/sctp_diag.c
net/sctp/socket.c
net/wireless/wext-core.c
scripts/Makefile.headersinst
scripts/genksyms/genksyms.h
scripts/kconfig/Makefile
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/tags.sh
security/integrity/evm/evm_crypto.c
security/integrity/ima/ima_policy.c
sound/core/pcm_lib.c
sound/firewire/amdtp-stream.c
sound/firewire/amdtp-stream.h
sound/pci/hda/hda_intel.c
sound/soc/intel/skylake/skl-nhlt.c
tools/perf/util/probe-event.c
tools/power/cpupower/utils/helpers/amd.c
tools/power/cpupower/utils/helpers/helpers.h
tools/power/cpupower/utils/helpers/misc.c
tools/power/x86/turbostat/turbostat.c
tools/power/x86/x86_energy_perf_policy/Makefile
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit.c
tools/testing/nvdimm/test/nfit_test.h
tools/testing/selftests/ntb/ntb_test.sh

index e9c5a1d9834af600de55821bc0077e6235ff0bff..f465647a4dd219a7718c26c88725a42dc5106f0d 100644 (file)
@@ -22,7 +22,8 @@ Required properties :
 - #clock-cells : must contain 1
 - #reset-cells : must contain 1
 
-For the PRCM CCUs on H3/A64, one more clock is needed:
+For the PRCM CCUs on H3/A64, two more clocks are needed:
+- "pll-periph": the SoC's peripheral PLL from the main CCU
 - "iosc": the SoC's internal frequency oscillator
 
 Example for generic CCU:
@@ -39,8 +40,8 @@ Example for PRCM CCU:
 r_ccu: clock@01f01400 {
        compatible = "allwinner,sun50i-a64-r-ccu";
        reg = <0x01f01400 0x100>;
-       clocks = <&osc24M>, <&osc32k>, <&iosc>;
-       clock-names = "hosc", "losc", "iosc";
+       clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
+       clock-names = "hosc", "losc", "iosc", "pll-periph";
        #clock-cells = <1>;
        #reset-cells = <1>;
 };
index 42c3bb2d53e88b651a7d39efe00e278c24d9c9b3..01e331a5f3e7491fba25188b5a12e91722057e42 100644 (file)
@@ -41,9 +41,9 @@ Required properties:
 Optional properties:
 
 In order to use the GPIO lines in PWM mode, some additional optional
-properties are required. Only Armada 370 and XP support these properties.
+properties are required.
 
-- compatible: Must contain "marvell,armada-370-xp-gpio"
+- compatible: Must contain "marvell,armada-370-gpio"
 
 - reg: an additional register set is needed, for the GPIO Blink
   Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
                };
 
                gpio1: gpio@18140 {
-                       compatible = "marvell,armada-370-xp-gpio";
+                       compatible = "marvell,armada-370-gpio";
                        reg = <0x18140 0x40>, <0x181c8 0x08>;
                        reg-names = "gpio", "pwm";
                        ngpios = <17>;
index bbd083f5600a786b23a0ec821f384e8e8a6a3553..1db6e0057a638e09a5346956a70620c31276fdf6 100644 (file)
@@ -31,7 +31,7 @@ Example:
                compatible = "st,stm32-timers";
                reg = <0x40010000 0x400>;
                clocks = <&rcc 0 160>;
-               clock-names = "clk_int";
+               clock-names = "int";
 
                pwm {
                        compatible = "st,stm32-pwm";
index d6c6e41648d4f17814ea43775e8591a40a4c0786..8ec2ca21adeb6ca840a7fd5d660a57748919af42 100644 (file)
@@ -34,7 +34,7 @@ Required properties:
       "brcm,bcm6328-switch"
       "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required and optional properties.
 
 Examples:
index 16c3a9501f5d689431451c52b91c86b8ae1395b8..acfafc8e143c4c8599510eb7d2cbcb35af7e6f9f 100644 (file)
@@ -27,6 +27,7 @@ Optional properties:
   of the device. On many systems this is wired high so the device goes
   out of reset at power-on, but if it is under program control, this
   optional GPIO can wake up in response to it.
+- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
 
 Examples:
 
index 09b5ab6a8a5ce8fd66bed85f30b9ca7db26c14c2..8b9b56d5806587b065fe4599e2ceec05bafe7004 100644 (file)
@@ -13462,6 +13462,17 @@ W:     http://en.wikipedia.org/wiki/Util-linux
 T:     git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:     Maintained
 
+UUID HELPERS
+M:     Christoph Hellwig <hch@lst.de>
+R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.infradead.org/users/hch/uuid.git
+F:     lib/uuid.c
+F:     lib/test_uuid.c
+F:     include/linux/uuid.h
+F:     include/uapi/linux/uuid.h
+S:     Maintained
+
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
index e40c471abe29fbc9e569bc9f4fca4302ec78cfb9..6d8a984ed9c975e8867b151e8e6e9424f85dea87 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -1437,7 +1437,7 @@ help:
        @echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
        @echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
        @echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
-       @echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse by default)'
+       @echo  '  make C=1   [targets] Check re-compiled c source with $$CHECK (sparse by default)'
        @echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
        @echo  '  make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
        @echo  '  make W=n   [targets] Enable extra gcc checks, n=1,2,3 where'
index 41b6e31f8f556f4d4af8ecdafd3867e540a8ed10..d0cb007fa4823791be514b8a5ac65d8d408f099d 100644 (file)
@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
                /* tkr_mono.cycle_last == tkr_raw.cycle_last */
                vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
                vdso_data->raw_time_sec         = tk->raw_time.tv_sec;
-               vdso_data->raw_time_nsec        = tk->raw_time.tv_nsec;
+               vdso_data->raw_time_nsec        = (tk->raw_time.tv_nsec <<
+                                                  tk->tkr_raw.shift) +
+                                                 tk->tkr_raw.xtime_nsec;
                vdso_data->xtime_clock_sec      = tk->xtime_sec;
                vdso_data->xtime_clock_nsec     = tk->tkr_mono.xtime_nsec;
-               /* tkr_raw.xtime_nsec == 0 */
                vdso_data->cs_mono_mult         = tk->tkr_mono.mult;
                vdso_data->cs_raw_mult          = tk->tkr_raw.mult;
                /* tkr_mono.shift == tkr_raw.shift */
index e00b4671bd7c4af5516b95da00409c7296df1963..76320e9209651fd307659dcbab8092ff7c1c09e2 100644 (file)
@@ -256,7 +256,6 @@ monotonic_raw:
        seqcnt_check fail=monotonic_raw
 
        /* All computations are done with left-shifted nsecs. */
-       lsl     x14, x14, x12
        get_nsec_per_sec res=x9
        lsl     x9, x9, x12
 
index 7c6336dd2638ce9c12c4ff8be566ff6acb856137..7cd92166a0b9a9bf3c14fe1df33442442ac668ac 100644 (file)
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
                          bool user, bool kernel)
 {
-       int idx_user, idx_kernel;
+       /*
+        * Initialize idx_user and idx_kernel to workaround bogus
+        * maybe-initialized warning when using GCC 6.
+        */
+       int idx_user = 0, idx_kernel = 0;
        unsigned long flags, old_entryhi;
 
        local_irq_save(flags);
index a83821f33ea36f8c005dab8e80dfdfcfd26a7058..8814a7249cebe29852dd8b4588a61052decac88c 100644 (file)
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
                           struct kprobe_ctlblk *kcb);
index ae418b85c17c4bce805227a82350d322259cf06e..b886795060fd2dba727c54d8c7b5e2b47a888f8d 100644 (file)
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
        .balign IFETCH_ALIGN_BYTES
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
-       andis.  r0,r4,0xa410            /* weird error? */
+       andis.  r0,r4,0xa450            /* weird error? */
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
-       andis.  r0,r4,DSISR_DABRMATCH@h
-       bne-    handle_dabr_fault
        CURRENT_THREAD_INFO(r11, r1)
        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
 
        /* Error */
        blt-    13f
+
+       /* Reload DSISR into r4 for the DABR check below */
+       ld      r4,_DSISR(r1)
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
-11:    ld      r4,_DAR(r1)
+11:    andis.  r0,r4,DSISR_DABRMATCH@h
+       bne-    handle_dabr_fault
+       ld      r4,_DAR(r1)
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_page_fault
index fc4343514bed8b0f05a88e64caf0285c44ee8ea0..01addfb0ed0a42216d64c7692c3fea4694c528fb 100644 (file)
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 
+int is_current_kprobe_addr(unsigned long addr)
+{
+       struct kprobe *p = kprobe_running();
+       return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        return  (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 #endif
 
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
+
        return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
         * saved regs...
         */
        memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+       /* It's OK to start function graph tracing again */
+       unpause_graph_tracing();
        preempt_enable_no_resched();
        return 1;
 }
index a8c1f99e96072530cb1f2d9ed702dffd78665720..4640f6d64f8b406a636d60c4ea2263658dde5be6 100644 (file)
@@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
 }
 #endif
 
+/*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+       ti->task = NULL;
+       ti->cpu = cpu;
+       ti->preempt_count = 0;
+       ti->local_flags = 0;
+       ti->flags = 0;
+       klp_init_thread_info(ti);
+}
+
 /*
  * Stack space used when we detect a bad kernel stack pointer, and
  * early in SMP boots before relocation is enabled. Exclusive emergency
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
         * Since we use these as temporary stacks during secondary CPU
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
+        *
+        * The IRQ stacks allocated elsewhere in this file are zeroed and
+        * initialized in kernel/irq.c. These are initialized here in order
+        * to have emergency stacks available as early as possible.
         */
        limit = min(safe_stack_limit(), ppc64_rma_size);
 
        for_each_possible_cpu(i) {
                struct thread_info *ti;
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
                /* emergency stack for machine check exception handling. */
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
        }
index 7c933a99f5d578bdfd1408b16d4e7fe33e95e4a9..c98e90b4ea7b1f15a2dd7157300376e370774cf2 100644 (file)
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
        stdu    r1,-SWITCH_FRAME_SIZE(r1)
 
        /* Save all gprs to pt_regs */
-       SAVE_8GPRS(0,r1)
-       SAVE_8GPRS(8,r1)
-       SAVE_8GPRS(16,r1)
-       SAVE_8GPRS(24,r1)
+       SAVE_GPR(0, r1)
+       SAVE_10GPRS(2, r1)
+       SAVE_10GPRS(12, r1)
+       SAVE_10GPRS(22, r1)
+
+       /* Save previous stack pointer (r1) */
+       addi    r8, r1, SWITCH_FRAME_SIZE
+       std     r8, GPR1(r1)
 
        /* Load special regs for save below */
        mfmsr   r8
@@ -95,18 +99,44 @@ ftrace_call:
        bl      ftrace_stub
        nop
 
-       /* Load ctr with the possibly modified NIP */
-       ld      r3, _NIP(r1)
-       mtctr   r3
+       /* Load the possibly modified NIP */
+       ld      r15, _NIP(r1)
+
 #ifdef CONFIG_LIVEPATCH
-       cmpd    r14,r3          /* has NIP been altered? */
+       cmpd    r14, r15        /* has NIP been altered? */
+#endif
+
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+       /* NIP has not been altered, skip over further checks */
+       beq     1f
+
+       /* Check if there is an active kprobe on us */
+       subi    r3, r14, 4
+       bl      is_current_kprobe_addr
+       nop
+
+       /*
+        * If r3 == 1, then this is a kprobe/jprobe.
+        * else, this is livepatched function.
+        *
+        * The conditional branch for livepatch_handler below will use the
+        * result of this comparison. For kprobe/jprobe, we just need to branch to
+        * the new NIP, not call livepatch_handler. The branch below is bne, so we
+        * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+        * CR0[EQ] = (r3 == 1).
+        */
+       cmpdi   r3, 1
+1:
 #endif
 
+       /* Load CTR with the possibly modified NIP */
+       mtctr   r15
+
        /* Restore gprs */
-       REST_8GPRS(0,r1)
-       REST_8GPRS(8,r1)
-       REST_8GPRS(16,r1)
-       REST_8GPRS(24,r1)
+       REST_GPR(0,r1)
+       REST_10GPRS(2,r1)
+       REST_10GPRS(12,r1)
+       REST_10GPRS(22,r1)
 
        /* Restore possibly modified LR */
        ld      r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
        addi r1, r1, SWITCH_FRAME_SIZE
 
 #ifdef CONFIG_LIVEPATCH
-        /* Based on the cmpd above, if the NIP was altered handle livepatch */
+        /*
+        * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+        * not on a kprobe/jprobe, then handle livepatch.
+        */
        bne-    livepatch_handler
 #endif
 
index 42b7a4fd57d9a557f8278a9f9a8c228f2758a1e8..8d1a365b8edc45fa9f655b77789a0d8602b462fd 100644 (file)
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
                break;
        case KVM_REG_PPC_TB_OFFSET:
+               /*
+                * POWER9 DD1 has an erratum where writing TBU40 causes
+                * the timebase to lose ticks.  So we don't let the
+                * timebase offset be changed on P9 DD1.  (It is
+                * initialized to zero.)
+                */
+               if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+                       break;
                /* round up to multiple of 2^24 */
                vcpu->arch.vcore->tb_offset =
                        ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
        int r;
        int srcu_idx;
+       unsigned long ebb_regs[3] = {}; /* shut up GCC */
+       unsigned long user_tar = 0;
+       unsigned int user_vrsave;
 
        if (!vcpu->arch.sane) {
                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                return -EINVAL;
        }
 
+       /*
+        * Don't allow entry with a suspended transaction, because
+        * the guest entry/exit code will lose it.
+        * If the guest has TM enabled, save away their TM-related SPRs
+        * (they will get restored by the TM unavailable interrupt).
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+           (current->thread.regs->msr & MSR_TM)) {
+               if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+                       run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+                       run->fail_entry.hardware_entry_failure_reason = 0;
+                       return -EINVAL;
+               }
+               current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+               current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+               current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+               current->thread.regs->msr &= ~MSR_TM;
+       }
+#endif
+
        kvmppc_core_prepare_to_enter(vcpu);
 
        /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
        flush_all_to_thread(current);
 
+       /* Save userspace EBB and other register values */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               ebb_regs[0] = mfspr(SPRN_EBBHR);
+               ebb_regs[1] = mfspr(SPRN_EBBRR);
+               ebb_regs[2] = mfspr(SPRN_BESCR);
+               user_tar = mfspr(SPRN_TAR);
+       }
+       user_vrsave = mfspr(SPRN_VRSAVE);
+
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
        vcpu->arch.pgdir = current->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                }
        } while (is_kvmppc_resume_guest(r));
 
+       /* Restore userspace EBB and other register values */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               mtspr(SPRN_EBBHR, ebb_regs[0]);
+               mtspr(SPRN_EBBRR, ebb_regs[1]);
+               mtspr(SPRN_BESCR, ebb_regs[2]);
+               mtspr(SPRN_TAR, user_tar);
+               mtspr(SPRN_FSCR, current->thread.fscr);
+       }
+       mtspr(SPRN_VRSAVE, user_vrsave);
+
  out:
        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
        atomic_dec(&vcpu->kvm->arch.vcpus_running);
index 0fdc4a28970b3c53d821088dae8eec5e2282dca4..404deb512844424d07bba8ead5bd77e70aee82af 100644 (file)
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
         * Put whatever is in the decrementer into the
         * hypervisor decrementer.
         */
+BEGIN_FTR_SECTION
+       ld      r5, HSTATE_KVM_VCORE(r13)
+       ld      r6, VCORE_KVM(r5)
+       ld      r9, KVM_HOST_LPCR(r6)
+       andis.  r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        mfspr   r8,SPRN_DEC
        mftb    r7
-       mtspr   SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+       /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+       bne     32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        extsw   r8,r8
+32:    mtspr   SPRN_HDEC,r8
        add     r8,r8,r7
        std     r8,HSTATE_DECEXP(r13)
 
index bdb3f76ceb6b9ff0e25e5b3b56c3be48dd6f65cc..4888dd494604f101a194a51ff168c44d85c4354d 100644 (file)
 #include <asm/opal.h>
 #include <asm/xive-regs.h>
 
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg)                       \
+BEGIN_FTR_SECTION;                             \
+       extsw   reg, reg;                       \
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 /* Values in HSTATE_NAPPING(r13) */
 #define NAPPING_CEDE   1
 #define NAPPING_NOVCPU 2
 
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS                    144
+#define STACK_SLOT_TRAP                (SFS-4)
+#define STACK_SLOT_TID         (SFS-16)
+#define STACK_SLOT_PSSCR       (SFS-24)
+#define STACK_SLOT_PID         (SFS-32)
+#define STACK_SLOT_IAMR                (SFS-40)
+#define STACK_SLOT_CIABR       (SFS-48)
+#define STACK_SLOT_DAWR                (SFS-56)
+#define STACK_SLOT_DAWRX       (SFS-64)
+
 /*
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 kvmppc_primary_no_guest:
        /* We handle this much like a ceded vcpu */
        /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+       /* HDEC may be larger than DEC for arch >= v3.00, but since the */
+       /* HDEC value came from DEC in the first place, it will fit */
        mfspr   r3, SPRN_HDEC
        mtspr   SPRN_DEC, r3
        /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
 
        /* See if our timeslice has expired (HDEC is negative) */
        mfspr   r0, SPRN_HDEC
+       EXTEND_HDEC(r0)
        li      r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-       cmpwi   r0, 0
+       cmpdi   r0, 0
        blt     kvm_novcpu_exit
 
        /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
        bl      kvmhv_accumulate_time
 #endif
 13:    mr      r3, r12
-       stw     r12, 112-4(r1)
+       stw     r12, STACK_SLOT_TRAP(r1)
        bl      kvmhv_commence_exit
        nop
-       lwz     r12, 112-4(r1)
+       lwz     r12, STACK_SLOT_TRAP(r1)
        b       kvmhv_switch_to_host
 
 /*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
        lbz     r4, HSTATE_PTID(r13)
        cmpwi   r4, 0
        bne     63f
-       lis     r6, 0x7fff
-       ori     r6, r6, 0xffff
+       LOAD_REG_ADDR(r6, decrementer_max)
+       ld      r6, 0(r6)
        mtspr   SPRN_HDEC, r6
        /* and set per-LPAR registers, if doing dynamic micro-threading */
        ld      r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  *                                                                            *
  *****************************************************************************/
 
-/* Stack frame offsets */
-#define STACK_SLOT_TID         (112-16)
-#define STACK_SLOT_PSSCR       (112-24)
-#define STACK_SLOT_PID         (112-32)
-
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
         */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -112(r1)
+       stdu    r1, -SFS(r1)
 
        /* Save R1 in the PACA */
        std     r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
        mfspr   r5, SPRN_TIDR
        mfspr   r6, SPRN_PSSCR
        mfspr   r7, SPRN_PID
+       mfspr   r8, SPRN_IAMR
        std     r5, STACK_SLOT_TID(r1)
        std     r6, STACK_SLOT_PSSCR(r1)
        std     r7, STACK_SLOT_PID(r1)
+       std     r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+       mfspr   r5, SPRN_CIABR
+       mfspr   r6, SPRN_DAWR
+       mfspr   r7, SPRN_DAWRX
+       std     r5, STACK_SLOT_CIABR(r1)
+       std     r6, STACK_SLOT_DAWR(r1)
+       std     r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 BEGIN_FTR_SECTION
        /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 
        /* Check if HDEC expires soon */
        mfspr   r3, SPRN_HDEC
-       cmpwi   r3, 512         /* 1 microsecond */
+       EXTEND_HDEC(r3)
+       cmpdi   r3, 512         /* 1 microsecond */
        blt     hdec_soon
 
 #ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
         * set by the guest could disrupt the host.
         */
        li      r0, 0
-       mtspr   SPRN_IAMR, r0
-       mtspr   SPRN_CIABR, r0
-       mtspr   SPRN_DAWRX, r0
+       mtspr   SPRN_PSPB, r0
        mtspr   SPRN_WORT, r0
 BEGIN_FTR_SECTION
+       mtspr   SPRN_IAMR, r0
        mtspr   SPRN_TCSCR, r0
        /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
        li      r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
        std     r6,VCPU_UAMOR(r9)
        li      r6,0
        mtspr   SPRN_AMR,r6
+       mtspr   SPRN_UAMOR, r6
 
        /* Switch DSCR back to host value */
        mfspr   r8, SPRN_DSCR
@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        ptesync
 
        /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+       ld      r5, STACK_SLOT_CIABR(r1)
+       ld      r6, STACK_SLOT_DAWR(r1)
+       ld      r7, STACK_SLOT_DAWRX(r1)
+       mtspr   SPRN_CIABR, r5
+       mtspr   SPRN_DAWR, r6
+       mtspr   SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
        ld      r5, STACK_SLOT_TID(r1)
        ld      r6, STACK_SLOT_PSSCR(r1)
        ld      r7, STACK_SLOT_PID(r1)
+       ld      r8, STACK_SLOT_IAMR(r1)
        mtspr   SPRN_TIDR, r5
        mtspr   SPRN_PSSCR, r6
        mtspr   SPRN_PID, r7
+       mtspr   SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 BEGIN_FTR_SECTION
        PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
        li      r0, KVM_GUEST_MODE_NONE
        stb     r0, HSTATE_IN_GUEST(r13)
 
-       ld      r0, 112+PPC_LR_STKOFF(r1)
-       addi    r1, r1, 112
+       ld      r0, SFS+PPC_LR_STKOFF(r1)
+       addi    r1, r1, SFS
        mtlr    r0
        blr
 
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
        mfspr   r3, SPRN_DEC
        mfspr   r4, SPRN_HDEC
        mftb    r5
-       cmpw    r3, r4
+       extsw   r3, r3
+       EXTEND_HDEC(r4)
+       cmpd    r3, r4
        ble     67f
        mtspr   SPRN_DEC, r4
 67:
        /* save expiry time of guest decrementer */
-       extsw   r3, r3
        add     r3, r3, r5
        ld      r4, HSTATE_KVM_VCPU(r13)
        ld      r5, HSTATE_KVM_VCORE(r13)
index cbd82fde57702e2a210608dc2e1800ae574465e0..09ceea6175ba9dc1d99b8b56eadae1367138b166 100644 (file)
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
                        struct pt_regs *regs_user_copy)
 {
        regs_user->regs = task_pt_regs(current);
-       regs_user->abi  = perf_reg_abi(current);
+       regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+                        PERF_SAMPLE_REGS_ABI_NONE;
 }
index e6f444b462079c3c4f4bea059337b92e700488f5..b5d960d6db3d0b18d33273b31ac67e03caebd02b 100644 (file)
@@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
        return mmio_atsd_reg;
 }
 
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
 {
        unsigned long launch;
 
@@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
        /* PID */
        launch |= pid << PPC_BITLSHIFT(38);
 
+       /* No flush */
+       launch |= !flush << PPC_BITLSHIFT(39);
+
        /* Invalidating the entire process doesn't use a va */
        return mmio_launch_invalidate(npu, launch, 0);
 }
 
 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
-                       unsigned long pid)
+                       unsigned long pid, bool flush)
 {
        unsigned long launch;
 
@@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
        /* PID */
        launch |= pid << PPC_BITLSHIFT(38);
 
+       /* No flush */
+       launch |= !flush << PPC_BITLSHIFT(39);
+
        return mmio_launch_invalidate(npu, launch, va);
 }
 
 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
 
+struct mmio_atsd_reg {
+       struct npu *npu;
+       int reg;
+};
+
+static void mmio_invalidate_wait(
+       struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+       struct npu *npu;
+       int i, reg;
+
+       /* Wait for all invalidations to complete */
+       for (i = 0; i <= max_npu2_index; i++) {
+               if (mmio_atsd_reg[i].reg < 0)
+                       continue;
+
+               /* Wait for completion */
+               npu = mmio_atsd_reg[i].npu;
+               reg = mmio_atsd_reg[i].reg;
+               while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+                       cpu_relax();
+
+               put_mmio_atsd_reg(npu, reg);
+
+               /*
+                * The GPU requires two flush ATSDs to ensure all entries have
+                * been flushed. We use PID 0 as it will never be used for a
+                * process on the GPU.
+                */
+               if (flush)
+                       mmio_invalidate_pid(npu, 0, true);
+       }
+}
+
 /*
  * Invalidate either a single address or an entire PID depending on
  * the value of va.
  */
 static void mmio_invalidate(struct npu_context *npu_context, int va,
-                       unsigned long address)
+                       unsigned long address, bool flush)
 {
-       int i, j, reg;
+       int i, j;
        struct npu *npu;
        struct pnv_phb *nphb;
        struct pci_dev *npdev;
-       struct {
-               struct npu *npu;
-               int reg;
-       } mmio_atsd_reg[NV_MAX_NPUS];
+       struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
        unsigned long pid = npu_context->mm->context.id;
 
        /*
@@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 
                        if (va)
                                mmio_atsd_reg[i].reg =
-                                       mmio_invalidate_va(npu, address, pid);
+                                       mmio_invalidate_va(npu, address, pid,
+                                                       flush);
                        else
                                mmio_atsd_reg[i].reg =
-                                       mmio_invalidate_pid(npu, pid);
+                                       mmio_invalidate_pid(npu, pid, flush);
 
                        /*
                         * The NPU hardware forwards the shootdown to all GPUs
@@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
         */
        flush_tlb_mm(npu_context->mm);
 
-       /* Wait for all invalidations to complete */
-       for (i = 0; i <= max_npu2_index; i++) {
-               if (mmio_atsd_reg[i].reg < 0)
-                       continue;
-
-               /* Wait for completion */
-               npu = mmio_atsd_reg[i].npu;
-               reg = mmio_atsd_reg[i].reg;
-               while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
-                       cpu_relax();
-               put_mmio_atsd_reg(npu, reg);
-       }
+       mmio_invalidate_wait(mmio_atsd_reg, flush);
+       if (flush)
+               /* Wait for the flush to complete */
+               mmio_invalidate_wait(mmio_atsd_reg, false);
 }
 
 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
         * There should be no more translation requests for this PID, but we
         * need to ensure any entries for it are removed from the TLB.
         */
-       mmio_invalidate(npu_context, 0, 0);
+       mmio_invalidate(npu_context, 0, 0, true);
 }
 
 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
 {
        struct npu_context *npu_context = mn_to_npu_context(mn);
 
-       mmio_invalidate(npu_context, 1, address);
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
 {
        struct npu_context *npu_context = mn_to_npu_context(mn);
 
-       mmio_invalidate(npu_context, 1, address);
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
        struct npu_context *npu_context = mn_to_npu_context(mn);
        unsigned long address;
 
-       for (address = start; address <= end; address += PAGE_SIZE)
-               mmio_invalidate(npu_context, 1, address);
+       for (address = start; address < end; address += PAGE_SIZE)
+               mmio_invalidate(npu_context, 1, address, false);
+
+       /* Do the flush only on the final addess == end */
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                /* No nvlink associated with this GPU device */
                return ERR_PTR(-ENODEV);
 
-       if (!mm) {
-               /* kernel thread contexts are not supported */
+       if (!mm || mm->context.id == 0) {
+               /*
+                * Kernel thread contexts are not supported and context id 0 is
+                * reserved on the GPU.
+                */
                return ERR_PTR(-EINVAL);
        }
 
index e784bed6ed7ffb24cabb02eb92a1d90494e3b7e1..2b498e58b91424b30d1f832aaf9eb1bda2fdf60a 100644 (file)
@@ -109,7 +109,7 @@ struct sysinfo_2_2_2 {
        unsigned short cpus_shared;
        char reserved_4[3];
        unsigned char vsne;
-       uuid_be uuid;
+       uuid_t uuid;
        char reserved_5[160];
        char ext_name[256];
 };
@@ -134,7 +134,7 @@ struct sysinfo_3_2_2 {
                char reserved_1[3];
                unsigned char evmne;
                unsigned int reserved_2;
-               uuid_be uuid;
+               uuid_t uuid;
        } vm[8];
        char reserved_3[1504];
        char ext_names[8][256];
index eefcb54872a59326e9178978fcff4b0814a97432..fb869b10382567d3be66c1c73e7beaa06876f942 100644 (file)
@@ -242,7 +242,7 @@ static void print_ext_name(struct seq_file *m, int lvl,
 
 static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
 {
-       if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+       if (uuid_is_null(&info->vm[i].uuid))
                return;
        seq_printf(m, "VM%02d UUID:            %pUb\n", i, &info->vm[i].uuid);
 }
index 9da243d94cc3286c5e1dabcfae5e563f991326a0..3b297fa3aa67c59be7fdb2fd2953f431adfbc1d4 100644 (file)
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
        ptr = asce.origin * 4096;
        if (asce.r) {
                *fake = 1;
+               ptr = 0;
                asce.dt = ASCE_TYPE_REGION1;
        }
        switch (asce.dt) {
        case ASCE_TYPE_REGION1:
-               if (vaddr.rfx01 > asce.tl && !asce.r)
+               if (vaddr.rfx01 > asce.tl && !*fake)
                        return PGM_REGION_FIRST_TRANS;
                break;
        case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
                union region1_table_entry rfte;
 
                if (*fake) {
-                       /* offset in 16EB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+                       ptr += (unsigned long) vaddr.rfx << 53;
                        rfte.val = ptr;
                        goto shadow_r2t;
                }
@@ -1036,8 +1036,7 @@ shadow_r2t:
                union region2_table_entry rste;
 
                if (*fake) {
-                       /* offset in 8PB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+                       ptr += (unsigned long) vaddr.rsx << 42;
                        rste.val = ptr;
                        goto shadow_r3t;
                }
@@ -1064,8 +1063,7 @@ shadow_r3t:
                union region3_table_entry rtte;
 
                if (*fake) {
-                       /* offset in 4TB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+                       ptr += (unsigned long) vaddr.rtx << 31;
                        rtte.val = ptr;
                        goto shadow_sgt;
                }
@@ -1101,8 +1099,7 @@ shadow_sgt:
                union segment_table_entry ste;
 
                if (*fake) {
-                       /* offset in 2G guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+                       ptr += (unsigned long) vaddr.sx << 20;
                        ste.val = ptr;
                        goto shadow_pgt;
                }
index a6d91d4e37a1f1dadae588a1c084e31e65d08f5d..110ce8238466f7e404d8312dd8d9297fa09355c9 100644 (file)
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
  [ C(DTLB) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
-               [ C(RESULT_MISS)   ] = 0x608,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+               [ C(RESULT_MISS)   ] = 0xe08,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
-               [ C(RESULT_MISS)   ] = 0x649,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
+               [ C(RESULT_MISS)   ] = 0xe49,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
        },
        [ C(OP_PREFETCH) ] = {
                [ C(RESULT_ACCESS) ] = 0x0,
index 05596261577937d65afcad75574bab676a7effe9..722d0e56886342a3a9f65d7f419d0e240a9cc6ab 100644 (file)
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
 
        bool perm_ok; /* do not check permissions if true */
        bool ud;        /* inject an #UD if host doesn't support insn */
+       bool tf;        /* TF value before instruction (after for syscall/sysret) */
 
        bool have_exception;
        struct x86_exception exception;
index fba1007139243b21081b6bfa114bed588e6f4e8e..d5acc27ed1cc79ab7bc621ae0d85ad4076cbc4cc 100644 (file)
@@ -2,8 +2,7 @@
 #define _ASM_X86_MSHYPER_H
 
 #include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
+#include <linux/atomic.h>
 #include <asm/hyperv.h>
 
 /*
index 673f9ac50f6d12612612e8efcce4eab0ef98bcbb..d8638e2419cc922208357b133e432dff09fb0916 100644 (file)
 #define HWP_MIN_PERF(x)                (x & 0xff)
 #define HWP_MAX_PERF(x)                ((x & 0xff) << 8)
 #define HWP_DESIRED_PERF(x)            ((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x)  ((x & 0xff) << 24)
-#define HWP_ACTIVITY_WINDOW(x)         ((x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x)         ((x & 0x1) << 42)
+#define HWP_ENERGY_PERF_PREFERENCE(x)  (((unsigned long long) x & 0xff) << 24)
+#define HWP_EPP_PERFORMANCE            0x00
+#define HWP_EPP_BALANCE_PERFORMANCE    0x80
+#define HWP_EPP_BALANCE_POWERSAVE      0xC0
+#define HWP_EPP_POWERSAVE              0xFF
+#define HWP_ACTIVITY_WINDOW(x)         ((unsigned long long)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x)         ((unsigned long long)(x & 0x1) << 42)
 
 /* IA32_HWP_STATUS */
 #define HWP_GUARANTEED_CHANGE(x)       (x & 0x1)
 #define MSR_MISC_PWR_MGMT              0x000001aa
 
 #define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
-#define ENERGY_PERF_BIAS_PERFORMANCE   0
-#define ENERGY_PERF_BIAS_NORMAL                6
-#define ENERGY_PERF_BIAS_POWERSAVE     15
+#define ENERGY_PERF_BIAS_PERFORMANCE           0
+#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE   4
+#define ENERGY_PERF_BIAS_NORMAL                        6
+#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE     8
+#define ENERGY_PERF_BIAS_POWERSAVE             15
 
 #define MSR_IA32_PACKAGE_THERM_STATUS          0x000001b1
 
index 8233a630280f52052ffb9a5f7c89f273cedf0425..dde437f5d14ff828dccff19275033ff2330acc60 100644 (file)
@@ -167,7 +167,8 @@ static int __init ffh_cstate_init(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       if (c->x86_vendor != X86_VENDOR_INTEL &&
+           c->x86_vendor != X86_VENDOR_AMD)
                return -1;
 
        cpu_cstate_entry = alloc_percpu(struct cstate_entry);
index 0816ab2e8adcae2b45f83c95c51e8b95a245b07e..80890dee66cebf370a3815e28f7bd7c34025b0d4 100644 (file)
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
                ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
        }
 
+       ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
        return X86EMUL_CONTINUE;
 }
 
index 87d3cb901935f2b251857f54ed53ca73567f10ef..0e846f0cb83bb214811d0a12d2f700cc96a455f9 100644 (file)
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
        ctxt->eflags = kvm_get_rflags(vcpu);
+       ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
        ctxt->eip = kvm_rip_read(vcpu);
        ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
                     (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
        return dr6;
 }
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
 {
        struct kvm_run *kvm_run = vcpu->run;
 
-       /*
-        * rflags is the old, "raw" value of the flags.  The new value has
-        * not been saved yet.
-        *
-        * This is correct even for TF set by the guest, because "the
-        * processor will not generate this exception after the instruction
-        * that sets the TF flag".
-        */
-       if (unlikely(rflags & X86_EFLAGS_TF)) {
-               if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-                       kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
-                                                 DR6_RTM;
-                       kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
-                       kvm_run->debug.arch.exception = DB_VECTOR;
-                       kvm_run->exit_reason = KVM_EXIT_DEBUG;
-                       *r = EMULATE_USER_EXIT;
-               } else {
-                       /*
-                        * "Certain debug exceptions may clear bit 0-3.  The
-                        * remaining contents of the DR6 register are never
-                        * cleared by the processor".
-                        */
-                       vcpu->arch.dr6 &= ~15;
-                       vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
-                       kvm_queue_exception(vcpu, DB_VECTOR);
-               }
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+               kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+               kvm_run->debug.arch.exception = DB_VECTOR;
+               kvm_run->exit_reason = KVM_EXIT_DEBUG;
+               *r = EMULATE_USER_EXIT;
+       } else {
+               /*
+                * "Certain debug exceptions may clear bit 0-3.  The
+                * remaining contents of the DR6 register are never
+                * cleared by the processor".
+                */
+               vcpu->arch.dr6 &= ~15;
+               vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+               kvm_queue_exception(vcpu, DB_VECTOR);
        }
 }
 
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
        int r = EMULATE_DONE;
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
-       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+
+       /*
+        * rflags is the old, "raw" value of the flags.  The new value has
+        * not been saved yet.
+        *
+        * This is correct even for TF set by the guest, because "the
+        * processor will not generate this exception after the instruction
+        * that sets the TF flag".
+        */
+       if (unlikely(rflags & X86_EFLAGS_TF))
+               kvm_vcpu_do_singlestep(vcpu, &r);
        return r == EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
                kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE)
-                       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+               if (r == EMULATE_DONE &&
+                   (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+                       kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
                    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
                        __kvm_set_rflags(vcpu, ctxt->eflags);
index 1f5b692526ae1a7199ee9bbaef305c4b0a42e696..0ded5e846335667406d58ce08e8439360baeb312 100644 (file)
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
                __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_inc(&q->shared_hctx_restart);
+       } else
+               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+}
+
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return false;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_dec(&q->shared_hctx_restart);
+       } else
+               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
+       if (blk_mq_hctx_has_pending(hctx)) {
+               blk_mq_run_hw_queue(hctx, true);
+               return true;
+       }
+
+       return false;
+}
+
 struct request *blk_mq_sched_get_request(struct request_queue *q,
                                         struct bio *bio,
                                         unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
        return true;
 }
 
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
-               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-               if (blk_mq_hctx_has_pending(hctx)) {
-                       blk_mq_run_hw_queue(hctx, true);
-                       return true;
-               }
-       }
-       return false;
-}
-
 /**
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * @pos:    loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
        unsigned int i, j;
 
        if (set->flags & BLK_MQ_F_TAG_SHARED) {
+               /*
+                * If this is 0, then we know that no hardware queues
+                * have RESTART marked. We're done.
+                */
+               if (!atomic_read(&queue->shared_hctx_restart))
+                       return;
+
                rcu_read_lock();
                list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
                                           tag_set_list) {
index edafb5383b7bbdedfd5365ed38f9a5c373ec96ab..5007edece51aced038d3db8f0adbc722c49e3d38 100644 (file)
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
        return false;
 }
 
-/*
- * Mark a hardware queue as needing a restart.
- */
-static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
        return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
index bb66c96850b18cb419b0e44aab1894169352f9af..958cedaff8b829ceb4c724dbf1c6f6d30d883aeb 100644 (file)
@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
        }
 }
 
+/*
+ * Caller needs to ensure that we're either frozen/quiesced, or that
+ * the queue isn't live yet.
+ */
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_inc(&q->shared_hctx_restart);
                        hctx->flags |= BLK_MQ_F_TAG_SHARED;
-               else
+               } else {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_dec(&q->shared_hctx_restart);
                        hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+               }
        }
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
+                                       bool shared)
 {
        struct request_queue *q;
 
index edcea70674c9de501b6d26b9082ddd780ecc5bea..2a365c756648ca66f55274a49cf1aaad2d1c96e8 100644 (file)
@@ -115,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
                ldm_error("PRIVHEAD disk size doesn't match real disk size");
                return false;
        }
-       if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) {
+       if (uuid_parse(data + 0x0030, &ph->disk_id)) {
                ldm_error("PRIVHEAD contains an invalid GUID.");
                return false;
        }
@@ -234,7 +234,7 @@ static bool ldm_compare_privheads (const struct privhead *ph1,
                (ph1->logical_disk_size  == ph2->logical_disk_size)     &&
                (ph1->config_start       == ph2->config_start)          &&
                (ph1->config_size        == ph2->config_size)           &&
-               !memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+               uuid_equal(&ph1->disk_id, &ph2->disk_id));
 }
 
 /**
@@ -557,7 +557,7 @@ static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
 
        list_for_each (item, &ldb->v_disk) {
                struct vblk *v = list_entry (item, struct vblk, list);
-               if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+               if (uuid_equal(&v->vblk.disk.disk_id, &ldb->ph.disk_id))
                        return v;
        }
 
@@ -892,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
        disk = &vb->vblk.disk;
        ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
                sizeof (disk->alt_name));
-       if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id))
+       if (uuid_parse(buffer + 0x19 + r_name, &disk->disk_id))
                return false;
 
        return true;
@@ -927,7 +927,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
                return false;
 
        disk = &vb->vblk.disk;
-       memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+       uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name));
        return true;
 }
 
index 374242c0971a671addd9d111ac64cade9cb22e50..f4c6055df9563892a991ac5f8c7c2fff12ce3e4b 100644 (file)
@@ -112,8 +112,6 @@ struct frag {                               /* VBLK Fragment handling */
 
 /* In memory LDM database structures. */
 
-#define GUID_SIZE              16
-
 struct privhead {                      /* Offsets and sizes are in sectors. */
        u16     ver_major;
        u16     ver_minor;
@@ -121,7 +119,7 @@ struct privhead {                   /* Offsets and sizes are in sectors. */
        u64     logical_disk_size;
        u64     config_start;
        u64     config_size;
-       u8      disk_id[GUID_SIZE];
+       uuid_t  disk_id;
 };
 
 struct tocblock {                      /* We have exactly two bitmaps. */
@@ -154,7 +152,7 @@ struct vblk_dgrp {                  /* VBLK Disk Group */
 };
 
 struct vblk_disk {                     /* VBLK Disk */
-       u8      disk_id[GUID_SIZE];
+       uuid_t  disk_id;
        u8      alt_name[128];
 };
 
index 502ea4dc208060d4daa2d1c296bf1b3545b4278a..560fdae8cc59015d78b13a73b377343e96495257 100644 (file)
@@ -141,9 +141,9 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        int     cpu = mce->extcpu;
        struct acpi_hest_generic_status *estatus, *tmp;
        struct acpi_hest_generic_data *gdata;
-       const uuid_le *fru_id = &NULL_UUID_LE;
+       const guid_t *fru_id = &guid_null;
        char *fru_text = "";
-       uuid_le *sec_type;
+       guid_t *sec_type;
        static u32 err_seq;
 
        estatus = extlog_elog_entry_check(cpu, bank);
@@ -165,11 +165,11 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        err_seq++;
        gdata = (struct acpi_hest_generic_data *)(tmp + 1);
        if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
-               fru_id = (uuid_le *)gdata->fru_id;
+               fru_id = (guid_t *)gdata->fru_id;
        if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
                fru_text = gdata->fru_text;
-       sec_type = (uuid_le *)gdata->section_type;
-       if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+       sec_type = (guid_t *)gdata->section_type;
+       if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
                struct cper_sec_mem_err *mem = (void *)(gdata + 1);
                if (gdata->error_data_length >= sizeof(*mem))
                        trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
@@ -182,17 +182,17 @@ out:
 
 static bool __init extlog_get_l1addr(void)
 {
-       u8 uuid[16];
+       guid_t guid;
        acpi_handle handle;
        union acpi_object *obj;
 
-       acpi_str_to_uuid(extlog_dsm_uuid, uuid);
-
+       if (guid_parse(extlog_dsm_uuid, &guid))
+               return false;
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
                return false;
-       if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
+       if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
                return false;
-       obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
+       obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
                                      EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
        if (!obj) {
                return false;
index d0855c09f32f36491026e750e64d0a824129e6dc..980515e029fae538de8889782226f7b284fbd727 100644 (file)
@@ -431,12 +431,13 @@ static void ghes_do_proc(struct ghes *ghes,
 {
        int sev, sec_sev;
        struct acpi_hest_generic_data *gdata;
+       guid_t *sec_type;
 
        sev = ghes_severity(estatus->error_severity);
        apei_estatus_for_each_section(estatus, gdata) {
+               sec_type = (guid_t *)gdata->section_type;
                sec_sev = ghes_severity(gdata->error_severity);
-               if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-                                CPER_SEC_PLATFORM_MEM)) {
+               if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
                        struct cper_sec_mem_err *mem_err;
                        mem_err = (struct cper_sec_mem_err *)(gdata+1);
                        ghes_edac_report_mem_error(ghes, sev, mem_err);
@@ -445,8 +446,7 @@ static void ghes_do_proc(struct ghes *ghes,
                        ghes_handle_memory_failure(gdata, sev);
                }
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-               else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-                                     CPER_SEC_PCIE)) {
+               else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
                        struct cper_sec_pcie *pcie_err;
                        pcie_err = (struct cper_sec_pcie *)(gdata+1);
                        if (sev == GHES_SEV_RECOVERABLE &&
index 784bda663d162d36d1e4bdc47ce8a6b5b595be8a..5a6fbe0fcaf2b82db279c40bd031abac2b4b6125 100644 (file)
@@ -196,42 +196,19 @@ static void acpi_print_osc_error(acpi_handle handle,
        pr_debug("\n");
 }
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
-{
-       int i;
-       static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
-               24, 26, 28, 30, 32, 34};
-
-       if (strlen(str) != 36)
-               return AE_BAD_PARAMETER;
-       for (i = 0; i < 36; i++) {
-               if (i == 8 || i == 13 || i == 18 || i == 23) {
-                       if (str[i] != '-')
-                               return AE_BAD_PARAMETER;
-               } else if (!isxdigit(str[i]))
-                       return AE_BAD_PARAMETER;
-       }
-       for (i = 0; i < 16; i++) {
-               uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
-               uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
-       }
-       return AE_OK;
-}
-EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
-
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
 {
        acpi_status status;
        struct acpi_object_list input;
        union acpi_object in_params[4];
        union acpi_object *out_obj;
-       u8 uuid[16];
+       guid_t guid;
        u32 errors;
        struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 
        if (!context)
                return AE_ERROR;
-       if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
+       if (guid_parse(context->uuid_str, &guid))
                return AE_ERROR;
        context->ret.length = ACPI_ALLOCATE_BUFFER;
        context->ret.pointer = NULL;
@@ -241,7 +218,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
        input.pointer = in_params;
        in_params[0].type               = ACPI_TYPE_BUFFER;
        in_params[0].buffer.length      = 16;
-       in_params[0].buffer.pointer     = uuid;
+       in_params[0].buffer.pointer     = (u8 *)&guid;
        in_params[1].type               = ACPI_TYPE_INTEGER;
        in_params[1].integer.value      = context->rev;
        in_params[2].type               = ACPI_TYPE_INTEGER;
index 656acb5d71660a6dfffaecc877bb46d76af08fb3..097eff0b963d5b6d1685809a16980efeaf406975 100644 (file)
@@ -74,11 +74,11 @@ struct nfit_table_prev {
        struct list_head flushes;
 };
 
-static u8 nfit_uuid[NFIT_UUID_MAX][16];
+static guid_t nfit_uuid[NFIT_UUID_MAX];
 
-const u8 *to_nfit_uuid(enum nfit_uuids id)
+const guid_t *to_nfit_uuid(enum nfit_uuids id)
 {
-       return nfit_uuid[id];
+       return &nfit_uuid[id];
 }
 EXPORT_SYMBOL(to_nfit_uuid);
 
@@ -222,7 +222,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
-       const u8 *uuid;
+       const guid_t *guid;
        int rc, i;
 
        func = cmd;
@@ -245,7 +245,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                cmd_mask = nvdimm_cmd_mask(nvdimm);
                dsm_mask = nfit_mem->dsm_mask;
                desc = nd_cmd_dimm_desc(cmd);
-               uuid = to_nfit_uuid(nfit_mem->family);
+               guid = to_nfit_uuid(nfit_mem->family);
                handle = adev->handle;
        } else {
                struct acpi_device *adev = to_acpi_dev(acpi_desc);
@@ -254,7 +254,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                cmd_mask = nd_desc->cmd_mask;
                dsm_mask = cmd_mask;
                desc = nd_cmd_bus_desc(cmd);
-               uuid = to_nfit_uuid(NFIT_DEV_BUS);
+               guid = to_nfit_uuid(NFIT_DEV_BUS);
                handle = adev->handle;
                dimm_name = "bus";
        }
@@ -289,7 +289,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                        in_buf.buffer.pointer,
                        min_t(u32, 256, in_buf.buffer.length), true);
 
-       out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
+       out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
        if (!out_obj) {
                dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
                                cmd_name);
@@ -409,7 +409,7 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
        int i;
 
        for (i = 0; i < NFIT_UUID_MAX; i++)
-               if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
+               if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
                        return i;
        return -1;
 }
@@ -1415,7 +1415,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
        struct acpi_device *adev, *adev_dimm;
        struct device *dev = acpi_desc->dev;
        unsigned long dsm_mask;
-       const u8 *uuid;
+       const guid_t *guid;
        int i;
        int family = -1;
 
@@ -1444,7 +1444,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
        /*
         * Until standardization materializes we need to consider 4
         * different command sets.  Note, that checking for function0 (bit0)
-        * tells us if any commands are reachable through this uuid.
+        * tells us if any commands are reachable through this GUID.
         */
        for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
                if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1474,9 +1474,9 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                return 0;
        }
 
-       uuid = to_nfit_uuid(nfit_mem->family);
+       guid = to_nfit_uuid(nfit_mem->family);
        for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
-               if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
+               if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
                        set_bit(i, &nfit_mem->dsm_mask);
 
        return 0;
@@ -1611,7 +1611,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
 {
        struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
-       const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
+       const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
        struct acpi_device *adev;
        int i;
 
@@ -1621,7 +1621,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
                return;
 
        for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
-               if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
+               if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
                        set_bit(i, &nd_desc->cmd_mask);
 }
 
@@ -3051,19 +3051,19 @@ static __init int nfit_init(void)
        BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
 
-       acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
-       acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
-       acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
-       acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
-       acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
-       acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
-       acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
-       acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
-       acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+       guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
+       guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
+       guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
+       guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
+       guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
+       guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
+       guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
+       guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
+       guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
+       guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
+       guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+       guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
+       guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
 
        nfit_wq = create_singlethread_workqueue("nfit");
        if (!nfit_wq)
index 58fb7d68e04a3219eb77dafa12f06fe49292ec9f..29bdd959517f806aedab452586654ab7b996a089 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/libnvdimm.h>
 #include <linux/ndctl.h>
 #include <linux/types.h>
-#include <linux/uuid.h>
 #include <linux/acpi.h>
 #include <acpi/acuuid.h>
 
@@ -237,7 +236,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
        return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
 }
 
-const u8 *to_nfit_uuid(enum nfit_uuids id);
+const guid_t *to_nfit_uuid(enum nfit_uuids id);
 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
 void acpi_nfit_shutdown(void *data);
 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
index 3a10d7573477e7dea0139c5f885e9514a1886a7a..d53162997f32002828a6db353bcd1926f2a8dc97 100644 (file)
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
        adev->flags.coherent_dma = cca;
 }
 
+static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+{
+       bool *is_spi_i2c_slave_p = data;
+
+       if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+               return 1;
+
+       /*
+        * devices that are connected to UART still need to be enumerated to
+        * platform bus
+        */
+       if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+               *is_spi_i2c_slave_p = true;
+
+        /* no need to do more checking */
+       return -1;
+}
+
+static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+{
+       struct list_head resource_list;
+       bool is_spi_i2c_slave = false;
+
+       INIT_LIST_HEAD(&resource_list);
+       acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
+                              &is_spi_i2c_slave);
+       acpi_dev_free_resource_list(&resource_list);
+
+       return is_spi_i2c_slave;
+}
+
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
                             int type, unsigned long long sta)
 {
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        acpi_bus_get_flags(device);
        device->flags.match_driver = false;
        device->flags.initialized = true;
+       device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
        acpi_device_clear_enumerated(device);
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
        return AE_OK;
 }
 
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
-{
-       bool *is_spi_i2c_slave_p = data;
-
-       if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
-               return 1;
-
-       /*
-        * devices that are connected to UART still need to be enumerated to
-        * platform bus
-        */
-       if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
-               *is_spi_i2c_slave_p = true;
-
-        /* no need to do more checking */
-       return -1;
-}
-
 static void acpi_default_enumeration(struct acpi_device *device)
 {
-       struct list_head resource_list;
-       bool is_spi_i2c_slave = false;
-
        /*
         * Do not enumerate SPI/I2C slaves as they will be enumerated by their
         * respective parents.
         */
-       INIT_LIST_HEAD(&resource_list);
-       acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
-                              &is_spi_i2c_slave);
-       acpi_dev_free_resource_list(&resource_list);
-       if (!is_spi_i2c_slave) {
+       if (!device->flags.spi_i2c_slave) {
                acpi_create_platform_device(device, NULL);
                acpi_device_set_enumerated(device);
        } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
                return;
 
        device->flags.match_driver = true;
-       if (ret > 0) {
+       if (ret > 0 && !device->flags.spi_i2c_slave) {
                acpi_device_set_enumerated(device);
                goto ok;
        }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
        if (ret < 0)
                return;
 
-       if (device->pnp.type.platform_id)
-               acpi_default_enumeration(device);
-       else
+       if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
                acpi_device_set_enumerated(device);
+       else
+               acpi_default_enumeration(device);
 
  ok:
        list_for_each_entry(child, &device->children, node)
index 27d0dcfcf47d6895a0f05963152172f87da0e7c8..b9d956c916f5e65ca737b5c02c45d266c65e00d1 100644 (file)
@@ -613,19 +613,19 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
 /**
  * acpi_evaluate_dsm - evaluate device's _DSM method
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes
+ * @guid: GUID of requested functions, should be 16 bytes
  * @rev: revision number of requested function
  * @func: requested function number
  * @argv4: the function specific parameter
  *
- * Evaluate device's _DSM method with specified UUID, revision id and
+ * Evaluate device's _DSM method with specified GUID, revision id and
  * function number. Caller needs to free the returned object.
  *
  * Though ACPI defines the fourth parameter for _DSM should be a package,
  * some old BIOSes do expect a buffer or an integer etc.
  */
 union acpi_object *
-acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
+acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
                  union acpi_object *argv4)
 {
        acpi_status ret;
@@ -638,7 +638,7 @@ acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
 
        params[0].type = ACPI_TYPE_BUFFER;
        params[0].buffer.length = 16;
-       params[0].buffer.pointer = (char *)uuid;
+       params[0].buffer.pointer = (u8 *)guid;
        params[1].type = ACPI_TYPE_INTEGER;
        params[1].integer.value = rev;
        params[2].type = ACPI_TYPE_INTEGER;
@@ -666,7 +666,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
 /**
  * acpi_check_dsm - check if _DSM method supports requested functions.
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes at least
+ * @guid: GUID of requested functions, should be 16 bytes at least
  * @rev: revision number of requested functions
  * @funcs: bitmap of requested functions
  *
@@ -674,7 +674,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
  * functions. Currently only support 64 functions at maximum, should be
  * enough for now.
  */
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
 {
        int i;
        u64 mask = 0;
@@ -683,7 +683,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
        if (funcs == 0)
                return false;
 
-       obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
+       obj = acpi_evaluate_dsm(handle, guid, rev, 0, NULL);
        if (!obj)
                return false;
 
@@ -697,7 +697,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
 
        /*
         * Bit 0 indicates whether there's support for any functions other than
-        * function 0 for the specified UUID and revision.
+        * function 0 for the specified GUID and revision.
         */
        if ((mask & 0x1) && (mask & funcs) == funcs)
                return true;
index 726c32e35db9c542e6f050ff0a04e31e10fc2b7d..0e824091a12fac8757c2ade5d4e5dae6a1470cbd 100644 (file)
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
        unsigned long timeout;
        int ret;
 
-       xen_blkif_get(blkif);
-
        set_freezable();
        while (!kthread_should_stop()) {
                if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
                print_stats(ring);
 
        ring->xenblkd = NULL;
-       xen_blkif_put(blkif);
 
        return 0;
 }
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 static void make_response(struct xen_blkif_ring *ring, u64 id,
                          unsigned short op, int st)
 {
-       struct blkif_response  resp;
+       struct blkif_response *resp;
        unsigned long     flags;
        union blkif_back_rings *blk_rings;
        int notify;
 
-       resp.id        = id;
-       resp.operation = op;
-       resp.status    = st;
-
        spin_lock_irqsave(&ring->blk_ring_lock, flags);
        blk_rings = &ring->blk_rings;
        /* Place on the response ring for the relevant domain. */
        switch (ring->blkif->blk_protocol) {
        case BLKIF_PROTOCOL_NATIVE:
-               memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->native,
+                                        blk_rings->native.rsp_prod_pvt);
                break;
        case BLKIF_PROTOCOL_X86_32:
-               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+                                        blk_rings->x86_32.rsp_prod_pvt);
                break;
        case BLKIF_PROTOCOL_X86_64:
-               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+                                        blk_rings->x86_64.rsp_prod_pvt);
                break;
        default:
                BUG();
        }
+
+       resp->id        = id;
+       resp->operation = op;
+       resp->status    = st;
+
        blk_rings->common.rsp_prod_pvt++;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
        spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
index dea61f6ab8cbdbaffedceb4c64bda239b51a63a4..ecb35fe8ca8dbb54f36a85513a09064819acd67a 100644 (file)
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
 struct blkif_common_request {
        char dummy;
 };
-struct blkif_common_response {
-       char dummy;
-};
+
+/* i386 protocol version */
 
 struct blkif_x86_32_request_rw {
        uint8_t        nr_segments;  /* number of segments                   */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
        } u;
 } __attribute__((__packed__));
 
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
-       uint64_t        id;              /* copied from request */
-       uint8_t         operation;       /* copied from request */
-       int16_t         status;          /* BLKIF_RSP_???       */
-};
-#pragma pack(pop)
 /* x86_64 protocol version */
 
 struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
        } u;
 } __attribute__((__packed__));
 
-struct blkif_x86_64_response {
-       uint64_t       __attribute__((__aligned__(8))) id;
-       uint8_t         operation;       /* copied from request */
-       int16_t         status;          /* BLKIF_RSP_???       */
-};
-
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-                 struct blkif_common_response);
+                 struct blkif_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-                 struct blkif_x86_32_response);
+                 struct blkif_response __packed);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-                 struct blkif_x86_64_response);
+                 struct blkif_response);
 
 union blkif_back_rings {
        struct blkif_back_ring        native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
 
        wait_queue_head_t       wq;
        atomic_t                inflight;
+       bool                    active;
        /* One thread per blkif ring. */
        struct task_struct      *xenblkd;
        unsigned int            waiting_reqs;
index 1f3dfaa54d871a36897408898c1e0e9f22100bb1..792da683e70dafafa6f69e224b8e57272d3e6be1 100644 (file)
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
                init_waitqueue_head(&ring->shutdown_wq);
                ring->blkif = blkif;
                ring->st_print = jiffies;
-               xen_blkif_get(blkif);
+               ring->active = true;
        }
 
        return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                struct xen_blkif_ring *ring = &blkif->rings[r];
                unsigned int i = 0;
 
+               if (!ring->active)
+                       continue;
+
                if (ring->xenblkd) {
                        kthread_stop(ring->xenblkd);
                        wake_up(&ring->shutdown_wq);
-                       ring->xenblkd = NULL;
                }
 
                /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                BUG_ON(ring->free_pages_num != 0);
                BUG_ON(ring->persistent_gnt_c != 0);
                WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-               xen_blkif_put(blkif);
+               ring->active = false;
        }
        blkif->nr_ring_pages = 0;
        /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-
-       xen_blkif_disconnect(blkif);
+       WARN_ON(xen_blkif_disconnect(blkif));
        xen_vbd_free(&blkif->vbd);
+       kfree(blkif->be->mode);
+       kfree(blkif->be);
 
        /* Make sure everything is drained before shutting down */
        kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
                xen_blkif_put(be->blkif);
        }
 
-       kfree(be->mode);
-       kfree(be);
        return 0;
 }
 
index e870f329db888c58e06bb854e7cf55d78a8bd313..01a260f67437488b425372c12d33142fce699f84 100644 (file)
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
                p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
                cp++; crng_init_cnt++; len--;
        }
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
        if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
                invalidate_batched_entropy();
                crng_init = 1;
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: fast init done\n");
        }
-       spin_unlock_irqrestore(&primary_crng.lock, flags);
        return 1;
 }
 
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
        }
        memzero_explicit(&buf, sizeof(buf));
        crng->init_time = jiffies;
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
        if (crng == &primary_crng && crng_init < 2) {
                invalidate_batched_entropy();
                crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: crng init done\n");
        }
-       spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
 static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 {
        u64 ret;
-       bool use_lock = crng_init < 2;
-       unsigned long flags;
+       bool use_lock = READ_ONCE(crng_init) < 2;
+       unsigned long flags = 0;
        struct batched_entropy *batch;
 
 #if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 {
        u32 ret;
-       bool use_lock = crng_init < 2;
-       unsigned long flags;
+       bool use_lock = READ_ONCE(crng_init) < 2;
+       unsigned long flags = 0;
        struct batched_entropy *batch;
 
        if (arch_get_random_int(&ret))
index b917b9d5f71021fcc433e6ca4e1552974750f237..c378c7b15d497a1c153b45892e572410c1898b42 100644 (file)
 
 #define ACPI_SIG_TPM2 "TPM2"
 
-static const u8 CRB_ACPI_START_UUID[] = {
-       /* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47,
-       /* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4
-};
+static const guid_t crb_acpi_start_guid =
+       GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+                 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
 
 enum crb_defaults {
        CRB_ACPI_START_REVISION_ID = 1,
@@ -266,7 +265,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
        int rc;
 
        obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
-                               CRB_ACPI_START_UUID,
+                               &crb_acpi_start_guid,
                                CRB_ACPI_START_REVISION_ID,
                                CRB_ACPI_START_INDEX,
                                NULL);
index 692a2c6ae036635da19ce4427aa86aacf6bef8d5..86dd8521feef5e2d2ee0ef6392ea3a0d13875d39 100644 (file)
 #define PPI_VS_REQ_START       128
 #define PPI_VS_REQ_END         255
 
-static const u8 tpm_ppi_uuid[] = {
-       0xA6, 0xFA, 0xDD, 0x3D,
-       0x1B, 0x36,
-       0xB4, 0x4E,
-       0xA4, 0x24,
-       0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
-};
+static const guid_t tpm_ppi_guid =
+       GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+                 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
 
 static inline union acpi_object *
 tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
             union acpi_object *argv4)
 {
        BUG_ON(!ppi_handle);
-       return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid,
+       return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
                                       TPM_PPI_REVISION_ID,
                                       func, argv4, type);
 }
@@ -107,7 +103,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
         * is updated with function index from SUBREQ to SUBREQ2 since PPI
         * version 1.1
         */
-       if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+       if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
                           TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
                func = TPM_PPI_FN_SUBREQ2;
 
@@ -268,7 +264,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
                "User not required",
        };
 
-       if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+       if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
                            1 << TPM_PPI_FN_GETOPR))
                return -EPERM;
 
@@ -341,12 +337,12 @@ void tpm_add_ppi(struct tpm_chip *chip)
        if (!chip->acpi_dev_handle)
                return;
 
-       if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+       if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
                            TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
                return;
 
        /* Cache PPI version string. */
-       obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
+       obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
                                      TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
                                      NULL, ACPI_TYPE_STRING);
        if (obj) {
index 19480bcc704630bc2e7e084fb4cb586f0ac3c254..2f29ee1a4d005422a087de38efc3b5274b8f7a04 100644 (file)
@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
 config COMMON_CLK_GXBB
        bool
        depends on COMMON_CLK_AMLOGIC
+       select RESET_CONTROLLER
        help
          Support for the clock controller on AmLogic S905 devices, aka gxbb.
          Say Y if you want peripherals and CPU frequency scaling to work.
index b0d551a8efe4d6d31114bc4f2cb3347173606cd9..eb89c7801f001b4a0ea0fde521fe695a298036b9 100644 (file)
@@ -156,6 +156,7 @@ config SUN8I_R_CCU
        bool "Support for Allwinner SoCs' PRCM CCUs"
        select SUNXI_CCU_DIV
        select SUNXI_CCU_GATE
+       select SUNXI_CCU_MP
        default MACH_SUN8I || (ARCH_SUNXI && ARM64)
 
 endif
index 9b3cd24b78d2326a06672555555f9752ebcf6b78..061b6fbb4f9591c0b77a54e8aa6a4ac0bb993e66 100644 (file)
@@ -31,7 +31,9 @@
 #define CLK_PLL_VIDEO0_2X              8
 #define CLK_PLL_VE                     9
 #define CLK_PLL_DDR0                   10
-#define CLK_PLL_PERIPH0                        11
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X             12
 #define CLK_PLL_PERIPH1                        13
 #define CLK_PLL_PERIPH1_2X             14
index 5c476f966a7220c468799f5011b9994eb23c3ad4..5372bf8be5e6fb8b0a43185279d33cc1cc30bd3b 100644 (file)
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk,   "ahb-ss",       "ahb",
 static SUNXI_CCU_GATE(ahb_dma_clk,     "ahb-dma",      "ahb",
                      0x060, BIT(6), 0);
 static SUNXI_CCU_GATE(ahb_bist_clk,    "ahb-bist",     "ahb",
-                     0x060, BIT(6), 0);
+                     0x060, BIT(7), 0);
 static SUNXI_CCU_GATE(ahb_mmc0_clk,    "ahb-mmc0",     "ahb",
                      0x060, BIT(8), 0);
 static SUNXI_CCU_GATE(ahb_mmc1_clk,    "ahb-mmc1",     "ahb",
index 89e68d29bf456ab3d682f7d2ba7d35ad4a21bb58..df97e25aec76b505ddb88ab7d4b3dc76475fb035 100644 (file)
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
                                 0x12c, 0, 4, 24, 3, BIT(31),
                                 CLK_SET_RATE_PARENT);
 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
-                                0x12c, 0, 4, 24, 3, BIT(31),
+                                0x130, 0, 4, 24, 3, BIT(31),
                                 CLK_SET_RATE_PARENT);
 
 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
index 85973d1e8165f9a085d9e574cc1f822430802c68..1b4baea37d810351d541d0b0a91d8a78a8524a95 100644 (file)
@@ -29,7 +29,9 @@
 #define CLK_PLL_VIDEO          6
 #define CLK_PLL_VE             7
 #define CLK_PLL_DDR            8
-#define CLK_PLL_PERIPH0                9
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X     10
 #define CLK_PLL_GPU            11
 #define CLK_PLL_PERIPH1                12
index e58706b40ae98281d281b117f447e07c15546444..6297add857b53112a851f9bc0f1205db5996fe1b 100644 (file)
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
        [RST_BUS_EMAC]          =  { 0x2c0, BIT(17) },
        [RST_BUS_HSTIMER]       =  { 0x2c0, BIT(19) },
        [RST_BUS_SPI0]          =  { 0x2c0, BIT(20) },
-       [RST_BUS_OTG]           =  { 0x2c0, BIT(23) },
+       [RST_BUS_OTG]           =  { 0x2c0, BIT(24) },
        [RST_BUS_EHCI0]         =  { 0x2c0, BIT(26) },
        [RST_BUS_OHCI0]         =  { 0x2c0, BIT(29) },
 
index 4bed671e490e0b15d79fd432f3d85dacfd094a96..8b5c30062d995968cc83e6c0511bcccab721873c 100644 (file)
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
                return 0;
        }
 
-       rate = readl_relaxed(frame + CNTFRQ);
+       rate = readl_relaxed(base + CNTFRQ);
 
-       iounmap(frame);
+       iounmap(base);
 
        return rate;
 }
index 44e5e951583bc38fc8c4a6a9b89ea91f7587af34..8e64b8460f113f56e829f69a414e229fea3c722a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/slab.h>
index 2e9c830ae1cd52d61e38dbb3620b2eb53dd4eb7e..c4656c4d44a6715a25b0b9ddaf432451f31ad32c 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/clk.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
index e82bb3c30b923c085c981bf517d689b414646c09..10be285c9055791d7e54927da62666c394dc6dc0 100644 (file)
@@ -144,10 +144,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cppc_dmi_max_khz = cppc_get_dmi_max_khz();
 
-       policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf;
+       /*
+        * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
+        * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
+        */
+       policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
+               cpu->perf_caps.highest_perf;
        policy->max = cppc_dmi_max_khz;
-       policy->cpuinfo.min_freq = policy->min;
-       policy->cpuinfo.max_freq = policy->max;
+
+       /*
+        * Set cpuinfo.min_freq to Lowest to make the full range of performance
+        * available if userspace wants to use any perf between lowest & lowest
+        * nonlinear perf
+        */
+       policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
+               cpu->perf_caps.highest_perf;
+       policy->cpuinfo.max_freq = cppc_dmi_max_khz;
+
        policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
        policy->shared_type = cpu->shared_type;
 
index 921b4a6c3d16bece3177b1e407883c8df9bcfa4a..1c262923fe588256e4ee1c48212f2742340ebf4a 100644 (file)
@@ -31,6 +31,7 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "arm,integrator-ap", },
        { .compatible = "arm,integrator-cp", },
 
+       { .compatible = "hisilicon,hi3660", },
        { .compatible = "hisilicon,hi6220", },
 
        { .compatible = "fsl,imx27", },
index 9180d34cc9fce09be6255559e8d85b03be5c35a6..b6b369c2227265640b606fbf4c9d8a4c43dda1db 100644 (file)
@@ -173,12 +173,12 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
        /* Enable PSTATE Change Event */
        tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
        tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
-        __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+       __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
 
        /* Enable PSTATE Change IRQ */
        tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
        tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
-        __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+       __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
 
        /* Set initial performance index */
        cpufreq_for_each_entry(pos, freq_table)
@@ -330,7 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        struct resource res;
        unsigned int cur_frequency;
 
-       np =  pdev->dev.of_node;
+       np = pdev->dev.of_node;
        if (!np)
                return -ENODEV;
 
index 9c13f097fd8c9a96adba12cccc8e7eda523a57e3..b6edd3ccaa55b3e59c272b580796193f266f9ed4 100644 (file)
@@ -101,7 +101,8 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
         *  - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
         *  - Disable pll2_pfd2_396m_clk
         */
-       if (of_machine_is_compatible("fsl,imx6ul")) {
+       if (of_machine_is_compatible("fsl,imx6ul") ||
+           of_machine_is_compatible("fsl,imx6ull")) {
                /*
                 * When changing pll1_sw_clk's parent to pll1_sys_clk,
                 * CPU may run at higher than 528MHz, this will lead to
@@ -215,7 +216,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                goto put_clk;
        }
 
-       if (of_machine_is_compatible("fsl,imx6ul")) {
+       if (of_machine_is_compatible("fsl,imx6ul") ||
+           of_machine_is_compatible("fsl,imx6ull")) {
                pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
                secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
                if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
index 75648325183275ecc2f3b58809b6ac7b5b0df30b..48a98f11a84ee79b9ccabcff6c6865a0c51a6e26 100644 (file)
@@ -651,6 +651,12 @@ static const char * const energy_perf_strings[] = {
        "power",
        NULL
 };
+static const unsigned int epp_values[] = {
+       HWP_EPP_PERFORMANCE,
+       HWP_EPP_BALANCE_PERFORMANCE,
+       HWP_EPP_BALANCE_POWERSAVE,
+       HWP_EPP_POWERSAVE
+};
 
 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
 {
@@ -662,17 +668,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
                return epp;
 
        if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
-               /*
-                * Range:
-                *      0x00-0x3F       :       Performance
-                *      0x40-0x7F       :       Balance performance
-                *      0x80-0xBF       :       Balance power
-                *      0xC0-0xFF       :       Power
-                * The EPP is a 8 bit value, but our ranges restrict the
-                * value which can be set. Here only using top two bits
-                * effectively.
-                */
-               index = (epp >> 6) + 1;
+               if (epp == HWP_EPP_PERFORMANCE)
+                       return 1;
+               if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
+                       return 2;
+               if (epp <= HWP_EPP_BALANCE_POWERSAVE)
+                       return 3;
+               else
+                       return 4;
        } else if (static_cpu_has(X86_FEATURE_EPB)) {
                /*
                 * Range:
@@ -710,15 +713,8 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
 
                value &= ~GENMASK_ULL(31, 24);
 
-               /*
-                * If epp is not default, convert from index into
-                * energy_perf_strings to epp value, by shifting 6
-                * bits left to use only top two bits in epp.
-                * The resultant epp need to shifted by 24 bits to
-                * epp position in MSR_HWP_REQUEST.
-                */
                if (epp == -EINVAL)
-                       epp = (pref_index - 1) << 6;
+                       epp = epp_values[pref_index - 1];
 
                value |= (u64)epp << 24;
                ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
index 992ce6f9abecdacf256e9d61c16a9e0d6742b767..3779742f86e3c08efbd9f18479ad61432f35d21e 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/msr.h>
 
-struct cpufreq_frequency_table *freq_table;
+static struct cpufreq_frequency_table *freq_table;
 static struct sfi_freq_table_entry *sfi_cpufreq_array;
 static int num_freq_table_entries;
 
index 21340e0be73e7c045dcf878a7ad759713ae026e9..f52144808455b28766e12cde86cdc557aeb5ebce 100644 (file)
@@ -4,6 +4,7 @@
 config ARM_CPUIDLE
         bool "Generic ARM/ARM64 CPU idle Driver"
         select DT_IDLE_STATES
+       select CPU_IDLE_MULTIPLE_DRIVERS
         help
           Select this to enable generic cpuidle driver for ARM.
           It provides a generic idle driver whose idle states are configured
index f440d385ed3471a3abd0ff5ce63d133c5f46785f..7080c384ad5de656e8c345a336167624b236a2bb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/topology.h>
 
 #include <asm/cpuidle.h>
 
@@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
        return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
 }
 
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
        .name = "arm_idle",
        .owner = THIS_MODULE,
        /*
@@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
 static int __init arm_idle_init(void)
 {
        int cpu, ret;
-       struct cpuidle_driver *drv = &arm_idle_driver;
+       struct cpuidle_driver *drv;
        struct cpuidle_device *dev;
 
-       /*
-        * Initialize idle states data, starting at index 1.
-        * This driver is DT only, if no DT idle states are detected (ret == 0)
-        * let the driver initialization fail accordingly since there is no
-        * reason to initialize the idle driver if only wfi is supported.
-        */
-       ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
-       if (ret <= 0)
-               return ret ? : -ENODEV;
-
-       ret = cpuidle_register_driver(drv);
-       if (ret) {
-               pr_err("Failed to register cpuidle driver\n");
-               return ret;
-       }
-
-       /*
-        * Call arch CPU operations in order to initialize
-        * idle states suspend back-end specific data
-        */
        for_each_possible_cpu(cpu) {
+
+               drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+               if (!drv) {
+                       ret = -ENOMEM;
+                       goto out_fail;
+               }
+
+               drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+               /*
+                * Initialize idle states data, starting at index 1.  This
+                * driver is DT only, if no DT idle states are detected (ret
+                * == 0) let the driver initialization fail accordingly since
+                * there is no reason to initialize the idle driver if only
+                * wfi is supported.
+                */
+               ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+               if (ret <= 0) {
+                       ret = ret ? : -ENODEV;
+                       goto out_fail;
+               }
+
+               ret = cpuidle_register_driver(drv);
+               if (ret) {
+                       pr_err("Failed to register cpuidle driver\n");
+                       goto out_fail;
+               }
+
+               /*
+                * Call arch CPU operations in order to initialize
+                * idle states suspend back-end specific data
+                */
                ret = arm_cpuidle_init(cpu);
 
                /*
@@ -141,10 +154,11 @@ out_fail:
                dev = per_cpu(cpuidle_devices, cpu);
                cpuidle_unregister_device(dev);
                kfree(dev);
+               drv = cpuidle_get_driver();
+               cpuidle_unregister_driver(drv);
+               kfree(drv);
        }
 
-       cpuidle_unregister_driver(drv);
-
        return ret;
 }
 device_initcall(arm_idle_init);
index b2330fd69e3464bbb5713a6b5dceadeba3421f0d..61b64c2b2cb869d9e6c494af5b6878471f2ce5ac 100644 (file)
@@ -286,6 +286,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        struct device *device = get_cpu_device(dev->cpu);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        int i;
+       int first_idx;
+       int idx;
        unsigned int interactivity_req;
        unsigned int expected_interval;
        unsigned long nr_iowaiters, cpu_load;
@@ -335,11 +337,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                if (data->next_timer_us > polling_threshold &&
                    latency_req > s->exit_latency && !s->disabled &&
                    !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
-                       data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+                       first_idx = CPUIDLE_DRIVER_STATE_START;
                else
-                       data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+                       first_idx = CPUIDLE_DRIVER_STATE_START - 1;
        } else {
-               data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+               first_idx = 0;
        }
 
        /*
@@ -359,20 +361,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * Find the idle state with the lowest power while satisfying
         * our constraints.
         */
-       for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
+       idx = -1;
+       for (i = first_idx; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
                struct cpuidle_state_usage *su = &dev->states_usage[i];
 
                if (s->disabled || su->disable)
                        continue;
+               if (idx == -1)
+                       idx = i; /* first enabled state */
                if (s->target_residency > data->predicted_us)
                        break;
                if (s->exit_latency > latency_req)
                        break;
 
-               data->last_state_idx = i;
+               idx = i;
        }
 
+       if (idx == -1)
+               idx = 0; /* No states enabled. Must use 0. */
+
+       data->last_state_idx = idx;
+
        return data->last_state_idx;
 }
 
index 5104b63981390adb878ed27f4ca2d0d758c65307..c83ea68be792df45a354f38dee2438a866a1f29d 100644 (file)
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
        u32 set;
 
        if (!of_device_is_compatible(mvchip->chip.of_node,
-                                    "marvell,armada-370-xp-gpio"))
+                                    "marvell,armada-370-gpio"))
                return 0;
 
        if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
                .data       = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
        },
        {
-               .compatible = "marvell,armada-370-xp-gpio",
+               .compatible = "marvell,armada-370-gpio",
                .data       = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
        },
        {
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                                                 mvchip);
        }
 
-       /* Armada 370/XP has simple PWM support for GPIO lines */
+       /* Some MVEBU SoCs have simple PWM support for GPIO lines */
        if (IS_ENABLED(CONFIG_PWM))
                return mvebu_pwm_probe(pdev, mvchip, id);
 
index 1cf78f4dd339f93ddd971088ec42a5146b9820fe..1e8e1123ddf416f18176cbc6e82fa791b3df9fb5 100644 (file)
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
                        DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
                                 adev->clock.default_dispclk / 100);
                        adev->clock.default_dispclk = 60000;
+               } else if (adev->clock.default_dispclk <= 60000) {
+                       DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+                                adev->clock.default_dispclk / 100);
+                       adev->clock.default_dispclk = 62500;
                }
                adev->clock.dp_extclk =
                        le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
index f2d705e6a75aa4f092d3d98ff739927e15b6f26b..ab6b0d0febab810ba4941e5a527435dd5d3161d8 100644 (file)
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        /* Vega 10 */
        {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
index 8c9bc75a9c2db63288f2c6765b02f68f63875194..8a0818b23ea40fadde57f6b469b2153330b710ab 100644 (file)
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-       ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+       ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
        memset(&args, 0, sizeof(args));
 
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 {
        int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-       ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+       ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
        memset(&args, 0, sizeof(args));
 
index 9f847615ac74ab012f6203a141627a5c6f5993e2..48ca2457df8c964977f3f7edae0980bc227b97cf 100644 (file)
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        if (!connector)
                return -ENOENT;
 
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       encoder = drm_connector_get_encoder(connector);
-       if (encoder)
-               out_resp->encoder_id = encoder->base.id;
-       else
-               out_resp->encoder_id = 0;
-
-       ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
-                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-                       &out_resp->count_props);
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-       if (ret)
-               goto out_unref;
-
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
                if (connector->encoder_ids[i] != 0)
                        encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
                                if (put_user(connector->encoder_ids[i],
                                             encoder_ptr + copied)) {
                                        ret = -EFAULT;
-                                       goto out_unref;
+                                       goto out;
                                }
                                copied++;
                        }
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
                        if (copy_to_user(mode_ptr + copied,
                                         &u_mode, sizeof(u_mode))) {
                                ret = -EFAULT;
+                               mutex_unlock(&dev->mode_config.mutex);
+
                                goto out;
                        }
                        copied++;
                }
        }
        out_resp->count_modes = mode_count;
-out:
        mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       encoder = drm_connector_get_encoder(connector);
+       if (encoder)
+               out_resp->encoder_id = encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /* Only grab properties after probing, to make sure EDID and other
+        * properties reflect the latest status. */
+       ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+                       &out_resp->count_props);
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
        drm_connector_put(connector);
 
        return ret;
index 462031cbd77f714b23a3b7645039c0d8dba71f40..615f0a855222f630d07311c92dce17d3bd371298 100644 (file)
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        unsigned int max_segment;
+       gfp_t noreclaim;
        int ret;
-       gfp_t gfp;
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
         * Fail silently without starting the shrinker
         */
        mapping = obj->base.filp->f_mapping;
-       gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       noreclaim = mapping_gfp_constraint(mapping,
+                                          ~(__GFP_IO | __GFP_RECLAIM));
+       noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
-               page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               if (unlikely(IS_ERR(page))) {
-                       i915_gem_shrink(dev_priv,
-                                       page_count,
-                                       I915_SHRINK_BOUND |
-                                       I915_SHRINK_UNBOUND |
-                                       I915_SHRINK_PURGEABLE);
+               const unsigned int shrink[] = {
+                       I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+                       0,
+               }, *s = shrink;
+               gfp_t gfp = noreclaim;
+
+               do {
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               }
-               if (unlikely(IS_ERR(page))) {
-                       gfp_t reclaim;
+                       if (likely(!IS_ERR(page)))
+                               break;
+
+                       if (!*s) {
+                               ret = PTR_ERR(page);
+                               goto err_sg;
+                       }
+
+                       i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+                       cond_resched();
 
                        /* We've tried hard to allocate the memory by reaping
                         * our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
                         * defer the oom here by reporting the ENOMEM back
                         * to userspace.
                         */
-                       reclaim = mapping_gfp_mask(mapping);
-                       reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
-
-                       page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
-                       if (IS_ERR(page)) {
-                               ret = PTR_ERR(page);
-                               goto err_sg;
+                       if (!*s) {
+                               /* reclaim and warn, but no oom */
+                               gfp = mapping_gfp_mask(mapping);
+
+                               /* Our bo are always dirty and so we require
+                                * kswapd to reclaim our pages (direct reclaim
+                                * does not effectively begin pageout of our
+                                * buffers on its own). However, direct reclaim
+                                * only waits for kswapd when under allocation
+                                * congestion. So as a result __GFP_RECLAIM is
+                                * unreliable and fails to actually reclaim our
+                                * dirty pages -- unless you try over and over
+                                * again with !__GFP_NORETRY. However, we still
+                                * want to fail this allocation rather than
+                                * trigger the out-of-memory killer and for
+                                * this we want the future __GFP_MAYFAIL.
+                                */
                        }
-               }
+               } while (1);
+
                if (!i ||
                    sg->length >= max_segment ||
                    page_to_pfn(page) != last_pfn + 1) {
@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 
        mapping = obj->base.filp->f_mapping;
        mapping_set_gfp_mask(mapping, mask);
+       GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
        i915_gem_object_init(obj, &i915_gem_object_ops);
 
index 5ddbc94997751adf5c9f04f7dd4a37a74d70de24..a74d0ac737cbeb7f9b9c5e93ea712a396e3c09d5 100644 (file)
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       req->head = req->ring->tail;
+       req->head = req->ring->emit;
 
        /* Check that we didn't interrupt ourselves with a new request */
        GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
index 1642fff9cf135d5edbe85864d1b327d59002c026..ab5140ba108ddcb2c9c5382cc6439223704f9fda 100644 (file)
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
        GEM_BUG_ON(freespace < wqi_size);
 
        /* The GuC firmware wants the tail index in QWords, not bytes */
-       tail = rq->tail;
-       assert_ring_tail_valid(rq->ring, rq->tail);
-       tail >>= 3;
+       tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
        GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
 
        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
index eb638a1e69d20c985263398e89928d218d30531e..42fb436f6cdc9dc410681d4af0d5f4148b58dc07 100644 (file)
@@ -15,13 +15,9 @@ static struct intel_dsm_priv {
        acpi_handle dhandle;
 } intel_dsm_priv;
 
-static const u8 intel_dsm_guid[] = {
-       0xd3, 0x73, 0xd8, 0x7e,
-       0xd0, 0xc2,
-       0x4f, 0x4e,
-       0xa8, 0x54,
-       0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
-};
+static const guid_t intel_dsm_guid =
+       GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+                 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
 
 static char *intel_dsm_port_name(u8 id)
 {
@@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)
        int i;
        union acpi_object *pkg, *connector_count;
 
-       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
                        INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
                        NULL, ACPI_TYPE_PACKAGE);
        if (!pkg) {
@@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
        if (!dhandle)
                return false;
 
-       if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+       if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
                            1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
                return false;
index 96b0b01677e26b22f382868f4b8b4c6dd738a4b3..9106ea32b048cac4783ae316d7cc198a4bf8ae88 100644 (file)
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 static void skylake_pfit_enable(struct intel_crtc *crtc);
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
-static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                        struct drm_modeset_acquire_ctx *ctx);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 
 struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
        struct drm_crtc *crtc;
        int i, ret;
 
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, ctx);
        i915_redisable_vga(to_i915(dev));
 
        if (!state)
@@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
                intel_update_watermarks(intel_crtc);
 }
 
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+                                       struct drm_modeset_acquire_ctx *ctx)
 {
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
                return;
        }
 
-       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+       state->acquire_ctx = ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
        crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
        intel_setup_outputs(dev_priv);
 
        drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
        drm_modeset_unlock_all(dev);
 
        for_each_intel_crtc(dev, crtc) {
@@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
        return 0;
 }
 
-static void intel_enable_pipe_a(struct drm_device *dev)
+static void intel_enable_pipe_a(struct drm_device *dev,
+                               struct drm_modeset_acquire_ctx *ctx)
 {
        struct intel_connector *connector;
        struct drm_connector_list_iter conn_iter;
        struct drm_connector *crt = NULL;
        struct intel_load_detect_pipe load_detect_temp;
-       struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
        int ret;
 
        /* We can't just switch on the pipe A, we need to set things up with a
@@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
                (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
 }
 
-static void intel_sanitize_crtc(struct intel_crtc *crtc)
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+                               struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                plane = crtc->plane;
                crtc->base.primary->state->visible = true;
                crtc->plane = !plane;
-               intel_crtc_disable_noatomic(&crtc->base);
+               intel_crtc_disable_noatomic(&crtc->base, ctx);
                crtc->plane = plane;
        }
 
@@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * resume. Force-enable the pipe to fix this, the update_dpms
                 * call below we restore the pipe to the right state, but leave
                 * the required bits on. */
-               intel_enable_pipe_a(dev);
+               intel_enable_pipe_a(dev, ctx);
        }
 
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
        if (crtc->active && !intel_crtc_has_encoders(crtc))
-               intel_crtc_disable_noatomic(&crtc->base);
+               intel_crtc_disable_noatomic(&crtc->base, ctx);
 
        if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
                /*
@@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
  * and sanitizes it to the current state
  */
 static void
-intel_modeset_setup_hw_state(struct drm_device *dev)
+intel_modeset_setup_hw_state(struct drm_device *dev,
+                            struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
@@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
        for_each_pipe(dev_priv, pipe) {
                crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-               intel_sanitize_crtc(crtc);
+               intel_sanitize_crtc(crtc, ctx);
                intel_dump_pipe_config(crtc, crtc->config,
                                       "[setup_hw_state]");
        }
index 6532e226db29b63da766a8571de231de4f7261f6..40ba3134545ef7e339c5bfe347501eeb7715dac0 100644 (file)
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
        struct intel_panel *panel = &connector->panel;
 
-       intel_dp_aux_enable_backlight(connector);
-
        if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
                panel->backlight.max = 0xFFFF;
        else
index dac4e003c1f317ec402110132bad0c3a734bf52a..62f44d3e7c43c0d90df093050d5af6d3d68fe3a3 100644 (file)
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
                rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
        u32 *reg_state = ce->lrc_reg_state;
 
-       assert_ring_tail_valid(rq->ring, rq->tail);
-       reg_state[CTX_RING_TAIL+1] = rq->tail;
+       reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
 
        /* True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
                        ce->state->obj->mm.dirty = true;
                        i915_gem_object_unpin_map(ce->state->obj);
 
-                       ce->ring->head = ce->ring->tail = 0;
-                       intel_ring_update_space(ce->ring);
+                       intel_ring_reset(ce->ring, 0);
                }
        }
 }
index 66a2b8b83972691d04f2737337e7ea6cf6a72851..513a0f4b469b32c9d0ac2e87c089bb6f2e4907ba 100644 (file)
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
 
 void intel_ring_update_space(struct intel_ring *ring)
 {
-       ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+       ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
 }
 
 static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 
        i915_gem_request_submit(request);
 
-       assert_ring_tail_valid(request->ring, request->tail);
-       I915_WRITE_TAIL(request->engine, request->tail);
+       I915_WRITE_TAIL(request->engine,
+                       intel_ring_set_tail(request->ring, request->tail));
 }
 
 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
        return PTR_ERR(addr);
 }
 
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+       GEM_BUG_ON(!list_empty(&ring->request_list));
+       ring->tail = tail;
+       ring->head = tail;
+       ring->emit = tail;
+       intel_ring_update_space(ring);
+}
+
 void intel_ring_unpin(struct intel_ring *ring)
 {
        GEM_BUG_ON(!ring->vma);
        GEM_BUG_ON(!ring->vaddr);
 
+       /* Discard any unused bytes beyond that submitted to hw. */
+       intel_ring_reset(ring, ring->tail);
+
        if (i915_vma_is_map_and_fenceable(ring->vma))
                i915_vma_unpin_iomap(ring->vma);
        else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
+       /* Restart from the beginning of the rings for convenience */
        for_each_engine(engine, dev_priv, id)
-               engine->buffer->head = engine->buffer->tail;
+               intel_ring_reset(engine->buffer, 0);
 }
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
                unsigned space;
 
                /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ring->tail,
+               space = __intel_ring_space(target->postfix, ring->emit,
                                           ring->size);
                if (space >= bytes)
                        break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
        struct intel_ring *ring = req->ring;
-       int remain_actual = ring->size - ring->tail;
-       int remain_usable = ring->effective_size - ring->tail;
+       int remain_actual = ring->size - ring->emit;
+       int remain_usable = ring->effective_size - ring->emit;
        int bytes = num_dwords * sizeof(u32);
        int total_bytes, wait_bytes;
        bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 
        if (unlikely(need_wrap)) {
                GEM_BUG_ON(remain_actual > ring->space);
-               GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+               GEM_BUG_ON(ring->emit + remain_actual > ring->size);
 
                /* Fill the tail with MI_NOOP */
-               memset(ring->vaddr + ring->tail, 0, remain_actual);
-               ring->tail = 0;
+               memset(ring->vaddr + ring->emit, 0, remain_actual);
+               ring->emit = 0;
                ring->space -= remain_actual;
        }
 
-       GEM_BUG_ON(ring->tail > ring->size - bytes);
-       cs = ring->vaddr + ring->tail;
-       ring->tail += bytes;
+       GEM_BUG_ON(ring->emit > ring->size - bytes);
+       cs = ring->vaddr + ring->emit;
+       ring->emit += bytes;
        ring->space -= bytes;
        GEM_BUG_ON(ring->space < 0);
 
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
        int num_dwords =
-               (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+               (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        u32 *cs;
 
        if (num_dwords == 0)
index a82a0807f64dbd0624728fe3c65215abe3647565..f7144fe0961347826c62e620af879bb4db9f0d77 100644 (file)
@@ -145,6 +145,7 @@ struct intel_ring {
 
        u32 head;
        u32 tail;
+       u32 emit;
 
        int space;
        int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+void intel_ring_update_space(struct intel_ring *ring);
 void intel_ring_unpin(struct intel_ring *ring);
 void intel_ring_free(struct intel_ring *ring);
 
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
         * reserved for the command packet (i.e. the value passed to
         * intel_ring_begin()).
         */
-       GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+       GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
 }
 
 static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
        GEM_BUG_ON(tail >= ring->size);
 }
 
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+       /* Whilst writes to the tail are strictly order, there is no
+        * serialisation between readers and the writers. The tail may be
+        * read by i915_gem_request_retire() just as it is being updated
+        * by execlists, as although the breadcrumb is complete, the context
+        * switch hasn't been seen.
+        */
+       assert_ring_tail_valid(ring, tail);
+       ring->tail = tail;
+       return tail;
+}
 
 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
index 39468c2180277618caddceb0681d1bdd39f53140..7459ef9943ec10bb2925854e9a11378b2e093bf7 100644 (file)
@@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {
 }
 
 #ifdef CONFIG_VGA_SWITCHEROO
-static const char nouveau_dsm_muid[] = {
-       0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
-       0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
-};
+static const guid_t nouveau_dsm_muid =
+       GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+                 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
 
-static const char nouveau_op_dsm_muid[] = {
-       0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
-       0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
-};
+static const guid_t nouveau_op_dsm_muid =
+       GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+                 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
 
 static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
 {
@@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
                args_buff[i] = (arg >> i * 8) & 0xFF;
 
        *result = 0;
-       obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+       obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
                                      func, &argv4, ACPI_TYPE_BUFFER);
        if (!obj) {
                acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)
                .integer.value = arg,
        };
 
-       obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+       obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
                                      func, &argv4, ACPI_TYPE_INTEGER);
        if (!obj) {
                acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
        if (!acpi_has_method(dhandle, "_DSM"))
                return;
 
-       supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+       supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
                                      1 << NOUVEAU_DSM_POWER);
        optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
 
index e3e2f5e838152bfc5f6f0eaa6b6b4fe1cbc15a84..f44682d62f750dcb5311505f67dc8691472e6463 100644 (file)
@@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
        struct nvkm_subdev *subdev = &mxm->subdev;
        struct nvkm_device *device = subdev->device;
-       static char muid[] = {
-               0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
-               0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
-       };
+       static guid_t muid =
+               GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
+                         0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
        u32 mxms_args[] = { 0x00000000 };
        union acpi_object argv4 = {
                .buffer.type = ACPI_TYPE_BUFFER,
@@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
         * unless you pass in exactly the version it supports..
         */
        rev = (version & 0xf0) << 4 | (version & 0x0f);
-       obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+       obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
        if (!obj) {
                nvkm_debug(subdev, "DSM MXMS failed\n");
                return false;
index 432480ff9d228857d57170b3353c143bf0501c3f..3178ba0c537c1915af3b857aad83efd6371f17ad 100644 (file)
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
            rdev->pdev->subsystem_vendor == 0x103c &&
            rdev->pdev->subsystem_device == 0x280a)
                return;
+       /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+        * - it hangs on resume inside the dynclk 1 table.
+        */
+       if (rdev->family == CHIP_RS400 &&
+           rdev->pdev->subsystem_vendor == 0x1179 &&
+           rdev->pdev->subsystem_device == 0xff31)
+               return;
 
        /* DYN CLK 1 */
        table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
index 6ecf42783d4b0c45539325edd2e5ffd2fc08e29f..0a6444d72000c434a6b494229a75c1a7d3e41631 100644 (file)
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
         */
        { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+       /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+        * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+        */
+       { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
        /* macbook pro 8.2 */
        { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
        { 0, 0, 0, 0, 0 },
index 8ca1e8ce0af24e325957526c125ccf55d9081eb8..4f9a3938189a020ca6257b85182e6c1b53426158 100644 (file)
 #define USB_VENDOR_ID_DELCOM           0x0fc5
 #define USB_DEVICE_ID_DELCOM_VISUAL_IND        0xb080
 
+#define USB_VENDOR_ID_DELL                             0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE    0x301a
+
 #define USB_VENDOR_ID_DELORME          0x1163
 #define USB_DEVICE_ID_DELORME_EARTHMATE        0x0100
 #define USB_DEVICE_ID_DELORME_EM_LT20  0x0200
index 1d6c997b300149269367d00fb5db66b7c2ea25b7..20b40ad2632503754685b84cc07d8787a4a44515 100644 (file)
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
-               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
+               __set_bit(BTN_TOOL_FINGER, input->keybit);
+               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+               __set_bit(BTN_TOUCH, input->keybit);
+               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
-       __set_bit(BTN_TOOL_FINGER, input->keybit);
-       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-       __set_bit(BTN_TOUCH, input->keybit);
-       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index fb55fb4c39fcfecaca55c0b8720d28d2f9717678..04015032a35a204b10593f71faac6812b82e45e8 100644 (file)
@@ -872,10 +872,9 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
 static int i2c_hid_acpi_pdata(struct i2c_client *client,
                struct i2c_hid_platform_data *pdata)
 {
-       static u8 i2c_hid_guid[] = {
-               0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
-               0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
-       };
+       static guid_t i2c_hid_guid =
+               GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+                         0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
        union acpi_object *obj;
        struct acpi_device *adev;
        acpi_handle handle;
@@ -884,7 +883,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        if (!handle || acpi_bus_get_device(handle, &adev))
                return -ENODEV;
 
-       obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
+       obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
                                      ACPI_TYPE_INTEGER);
        if (!obj) {
                dev_err(&client->dev, "device _DSM execution failed\n");
index 6316498b78128574ff63b0047772f009e6d0cfeb..a88e7c7bea0a0cb7d069c262c026231f3bc75dea 100644 (file)
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
index 95ed17183e73e904e06b13c383bee28410797172..54a47b40546f69c7ea0d3dbf033c22c95f106516 100644 (file)
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
                 * the first read operation, otherwise the first read cost
                 * one extra clock cycle.
                 */
-               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                temp |= I2CR_MTX;
-               writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        }
        msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
 
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
                                 * the first read operation, otherwise the first read cost
                                 * one extra clock cycle.
                                 */
-                               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+                               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                                temp |= I2CR_MTX;
-                               writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+                               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
                        }
                } else if (i == (msgs->len - 2)) {
                        dev_dbg(&i2c_imx->adapter.dev,
index 216d7ec88c0c7d55eca7ef198a53fb754da3c182..c2ae819a871cb6d8f09412702e46463397f9fc0f 100644 (file)
@@ -51,6 +51,8 @@
 /* un-comment DEBUG to enable pr_debug() statements */
 #define DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/cpuidle.h>
 #include <linux/tick.h>
@@ -65,7 +67,6 @@
 #include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4.1"
-#define PREFIX "intel_idle: "
 
 static struct cpuidle_driver intel_idle_driver = {
        .name = "intel_idle",
@@ -1111,7 +1112,7 @@ static int __init intel_idle_probe(void)
        const struct x86_cpu_id *id;
 
        if (max_cstate == 0) {
-               pr_debug(PREFIX "disabled\n");
+               pr_debug("disabled\n");
                return -EPERM;
        }
 
@@ -1119,8 +1120,8 @@ static int __init intel_idle_probe(void)
        if (!id) {
                if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
                    boot_cpu_data.x86 == 6)
-                       pr_debug(PREFIX "does not run on family %d model %d\n",
-                               boot_cpu_data.x86, boot_cpu_data.x86_model);
+                       pr_debug("does not run on family %d model %d\n",
+                                boot_cpu_data.x86, boot_cpu_data.x86_model);
                return -ENODEV;
        }
 
@@ -1134,13 +1135,13 @@ static int __init intel_idle_probe(void)
            !mwait_substates)
                        return -ENODEV;
 
-       pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+       pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
 
        icpu = (const struct idle_cpu *)id->driver_data;
        cpuidle_state_table = icpu->state_table;
 
-       pr_debug(PREFIX "v" INTEL_IDLE_VERSION
-               " model 0x%X\n", boot_cpu_data.x86_model);
+       pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+                boot_cpu_data.x86_model);
 
        return 0;
 }
@@ -1340,8 +1341,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
                        break;
 
                if (cstate + 1 > max_cstate) {
-                       printk(PREFIX "max_cstate %d reached\n",
-                               max_cstate);
+                       pr_info("max_cstate %d reached\n", max_cstate);
                        break;
                }
 
@@ -1358,8 +1358,8 @@ static void __init intel_idle_cpuidle_driver_init(void)
 
                /* if state marked as disabled, skip it */
                if (cpuidle_state_table[cstate].disabled != 0) {
-                       pr_debug(PREFIX "state %s is disabled",
-                               cpuidle_state_table[cstate].name);
+                       pr_debug("state %s is disabled\n",
+                                cpuidle_state_table[cstate].name);
                        continue;
                }
 
@@ -1395,7 +1395,7 @@ static int intel_idle_cpu_init(unsigned int cpu)
        dev->cpu = cpu;
 
        if (cpuidle_register_device(dev)) {
-               pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+               pr_debug("cpuidle_register_device %d failed!\n", cpu);
                return -EIO;
        }
 
@@ -1447,8 +1447,8 @@ static int __init intel_idle_init(void)
        retval = cpuidle_register_driver(&intel_idle_driver);
        if (retval) {
                struct cpuidle_driver *drv = cpuidle_get_driver();
-               printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
-                       drv ? drv->name : "none");
+               printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
+                      drv ? drv->name : "none");
                goto init_driver_fail;
        }
 
@@ -1460,8 +1460,8 @@ static int __init intel_idle_init(void)
        if (retval < 0)
                goto hp_setup_fail;
 
-       pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
-               lapic_timer_reliable_states);
+       pr_debug("lapic_timer_reliable_states 0x%x\n",
+                lapic_timer_reliable_states);
 
        return 0;
 
index e37d37273182097d412f30a3878ea0303f9f7a5b..f600f3a7a3c685488e1ede36058439c59f0703dc 100644 (file)
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
 
        if (!btns_desc) {
                dev_err(dev, "ACPI Button Descriptors not found\n");
-               return ERR_PTR(-ENODEV);
+               button_info = ERR_PTR(-ENODEV);
+               goto out;
        }
 
        /* The first package describes the collection */
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
        }
        if (collection_uid == -1) {
                dev_err(dev, "Invalid Button Collection Descriptor\n");
-               return ERR_PTR(-ENODEV);
+               button_info = ERR_PTR(-ENODEV);
+               goto out;
        }
 
        /* There are package.count - 1 buttons + 1 terminating empty entry */
        button_info = devm_kcalloc(dev, btns_desc->package.count,
                                   sizeof(*button_info), GFP_KERNEL);
-       if (!button_info)
-               return ERR_PTR(-ENOMEM);
+       if (!button_info) {
+               button_info = ERR_PTR(-ENOMEM);
+               goto out;
+       }
 
        /* Parse the button descriptors */
        for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
                if (soc_button_parse_btn_desc(dev,
                                              &btns_desc->package.elements[i],
                                              collection_uid,
-                                             &button_info[btn]))
-                       return ERR_PTR(-ENODEV);
+                                             &button_info[btn])) {
+                       button_info = ERR_PTR(-ENODEV);
+                       goto out;
+               }
        }
 
+out:
+       kfree(buf.pointer);
        return button_info;
 }
 
index dea63e2db3e6213f5e83d6067870a16cc5707e6d..f5206e2c767ebf3579c2468b2a2956cc4bff3dcc 100644 (file)
@@ -31,9 +31,6 @@
 #define F54_GET_REPORT          1
 #define F54_FORCE_CAL           2
 
-/* Fixed sizes of reports */
-#define F54_QUERY_LEN                  27
-
 /* F54 capabilities */
 #define F54_CAP_BASELINE       (1 << 2)
 #define F54_CAP_IMAGE8         (1 << 3)
@@ -95,7 +92,6 @@ struct rmi_f54_reports {
 struct f54_data {
        struct rmi_function *fn;
 
-       u8 qry[F54_QUERY_LEN];
        u8 num_rx_electrodes;
        u8 num_tx_electrodes;
        u8 capabilities;
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn)
 {
        int error;
        struct f54_data *f54;
+       u8 buf[6];
 
        f54 = dev_get_drvdata(&fn->dev);
 
        error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
-                              &f54->qry, sizeof(f54->qry));
+                              buf, sizeof(buf));
        if (error) {
                dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
                        __func__);
                return error;
        }
 
-       f54->num_rx_electrodes = f54->qry[0];
-       f54->num_tx_electrodes = f54->qry[1];
-       f54->capabilities = f54->qry[2];
-       f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8);
-       f54->family = f54->qry[5];
+       f54->num_rx_electrodes = buf[0];
+       f54->num_tx_electrodes = buf[1];
+       f54->capabilities = buf[2];
+       f54->clock_rate = buf[3] | (buf[4] << 8);
+       f54->family = buf[5];
 
        rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
                f54->num_rx_electrodes);
index 09720d950686c844b49f1d7f32710e160d21624a..f932a83b4990210d8daeb25c1d2482b958c3719e 100644 (file)
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
                },
        },
+       {
+               /* Fujitsu UH554 laptop */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+               },
+       },
        { }
 };
 
index cbf7763d8091035deb45d2c36a7fc7201e6b4ab0..c8b0329c85d2801e1cd99c74c9959f03ee1790d2 100644 (file)
@@ -1808,10 +1808,9 @@ IOMMU_INIT_POST(detect_intel_iommu);
  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
  * "Remapping Hardware Unit Hot Plug".
  */
-static u8 dmar_hp_uuid[] = {
-       /* 0000 */    0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
-       /* 0008 */    0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
-};
+static guid_t dmar_hp_guid =
+       GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
+                 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
 
 /*
  * Currently there's only one revision and BIOS will not check the revision id,
@@ -1824,7 +1823,7 @@ static u8 dmar_hp_uuid[] = {
 
 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
 {
-       return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+       return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
 }
 
 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
@@ -1843,7 +1842,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
        if (!dmar_detect_dsm(handle, func))
                return 0;
 
-       obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+       obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
                                      func, NULL, ACPI_TYPE_BUFFER);
        if (!obj)
                return -ENODEV;
index eb7fbe15996304fc9eecca11a2b71b70bffd284d..929f8558bf1c0fe247d5c0ab99e40dfaaea094bd 100644 (file)
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
 }
 
 #ifdef CONFIG_CLKSRC_MIPS_GIC
-u64 gic_read_count(void)
+u64 notrace gic_read_count(void)
 {
        unsigned int hi, hi2, lo;
 
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void)
        return bits;
 }
 
-void gic_write_compare(u64 cnt)
+void notrace gic_write_compare(u64 cnt)
 {
        if (mips_cm_is64) {
                gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt)
        }
 }
 
-void gic_write_cpu_compare(u64 cnt, int cpu)
+void notrace gic_write_cpu_compare(u64 cnt, int cpu)
 {
        unsigned long flags;
 
index 7910bfe50da4469c44b571363cc6696f74f5fa42..93b18108816813bd4d49e0ba3e6eb57be7ae3d9f 100644 (file)
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
 {
        struct bio *bio;
-       spin_lock_irq(&ic->endio_wait.lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ic->endio_wait.lock, flags);
        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
        bio_list_add(&ic->flush_bio_list, bio);
-       spin_unlock_irq(&ic->endio_wait.lock);
+       spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+
        queue_work(ic->commit_wq, &ic->commit_work);
 }
 
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                ti->error = "The device is too small";
                goto bad;
        }
+       if (ti->len > ic->provided_data_sectors) {
+               r = -EINVAL;
+               ti->error = "Not enough provided sectors for requested mapping size";
+               goto bad;
+       }
 
        if (!buffer_sectors)
                buffer_sectors = 1;
index 3702e502466d37a902c64a74a1f5ad7b516770bb..8d5ca30f655123611b5dcd2ba3d35b52e9c6c447 100644 (file)
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
        else if (op == REQ_OP_WRITE_SAME)
                special_cmd_max_sectors = q->limits.max_write_same_sectors;
        if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
-            op == REQ_OP_WRITE_SAME)  &&
-           special_cmd_max_sectors == 0) {
+            op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
+               atomic_inc(&io->count);
                dec_count(io, region, -EOPNOTSUPP);
                return;
        }
index e61c45047c25a9ba2683c313fbc2151c9051b178..4da8858856fb3019c16d5681c241f8733dca21ec 100644 (file)
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
 
 struct dm_raid1_bio_record {
        struct mirror *m;
+       /* if details->bi_bdev == NULL, details were not saved */
        struct dm_bio_details details;
        region_t write_region;
 };
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        struct dm_raid1_bio_record *bio_record =
          dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
 
+       bio_record->details.bi_bdev = NULL;
+
        if (rw == WRITE) {
                /* Save region for mirror_end_io() handler */
                bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
        }
 
        if (error == -EOPNOTSUPP)
-               return error;
+               goto out;
 
        if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+               goto out;
 
        if (unlikely(error)) {
+               if (!bio_record->details.bi_bdev) {
+                       /*
+                        * There wasn't enough memory to record necessary
+                        * information for a retry or there was no other
+                        * mirror in-sync.
+                        */
+                       DMERR_LIMIT("Mirror read failed.");
+                       return -EIO;
+               }
+
                m = bio_record->m;
 
                DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
+                       bio_record->details.bi_bdev = NULL;
                        bio->bi_error = 0;
 
                        queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                DMERR("All replicated volumes dead, failing I/O");
        }
 
+out:
+       bio_record->details.bi_bdev = NULL;
+
        return error;
 }
 
index 87edc342ccb3d5c51bb45313f1218f9839528917..84e76ebac4d4eac844850f7b1b37e12d2d0d72f2 100644 (file)
@@ -825,7 +825,7 @@ fail:
        return -EINVAL;
 }
 
-static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
        return  sb1->set_uuid0 == sb2->set_uuid0 &&
                sb1->set_uuid1 == sb2->set_uuid1 &&
@@ -833,7 +833,7 @@ static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
                sb1->set_uuid3 == sb2->set_uuid3;
 }
 
-static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
        int ret;
        mdp_super_t *tmp1, *tmp2;
@@ -1025,12 +1025,12 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
        } else {
                __u64 ev1, ev2;
                mdp_super_t *refsb = page_address(refdev->sb_page);
-               if (!uuid_equal(refsb, sb)) {
+               if (!md_uuid_equal(refsb, sb)) {
                        pr_warn("md: %s has different UUID to %s\n",
                                b, bdevname(refdev->bdev,b2));
                        goto abort;
                }
-               if (!sb_equal(refsb, sb)) {
+               if (!md_sb_equal(refsb, sb)) {
                        pr_warn("md: %s has same UUID but different superblock to %s\n",
                                b, bdevname(refdev->bdev, b2));
                        goto abort;
index 75488e65cd96cf6484e300d47280df77f1eec77a..8d46e3ad9529d46a2b2e27229668e517ef38552f 100644 (file)
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
        int ret;
 
        ret = regmap_read_poll_timeout(arizona->regmap,
-                                      ARIZONA_INTERRUPT_RAW_STATUS_5, val,
-                                      ((val & mask) == target),
+                                      reg, val, ((val & mask) == target),
                                       ARIZONA_REG_POLL_DELAY_US,
                                       timeout_ms * 1000);
        if (ret)
index 92fc3f7c538d73e9e496aacfeae45f37cd3c1a96..9577beb278e7d51fc1ed08db0f52053abebec1ec 100644 (file)
@@ -404,10 +404,9 @@ struct intel_host {
        bool    d3_retune;
 };
 
-const u8 intel_dsm_uuid[] = {
-       0xA5, 0x3E, 0xC1, 0xF6, 0xCD, 0x65, 0x1F, 0x46,
-       0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61,
-};
+const guid_t intel_dsm_guid =
+       GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+                 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
 
 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
                       unsigned int fn, u32 *result)
@@ -416,7 +415,7 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
        int err = 0;
        size_t len;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), intel_dsm_uuid, 0, fn, NULL);
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
        if (!obj)
                return -EOPNOTSUPP;
 
index ea1bfcf1870afbc5806e61dd6416669216f38ce7..53309f659951042ca6301c95e110910414031e70 100644 (file)
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
 {
        int err;
 
+       mutex_lock(&uld_mutex);
        err = setup_sge_queues(adap);
        if (err)
-               goto out;
+               goto rel_lock;
        err = setup_rss(adap);
        if (err)
                goto freeq;
@@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
                        goto irq_err;
        }
 
-       mutex_lock(&uld_mutex);
        enable_rx(adap);
        t4_sge_start(adap);
        t4_intr_enable(adap);
@@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
 #endif
        /* Initialize hash mac addr list*/
        INIT_LIST_HEAD(&adap->mac_hlist);
- out:
        return err;
+
  irq_err:
        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
  freeq:
        t4_free_sge_resources(adap);
-       goto out;
+ rel_lock:
+       mutex_unlock(&uld_mutex);
+       return err;
 }
 
 static void cxgb_down(struct adapter *adapter)
index 9a520e4f0df9a0d47b75f71f01557414ba3d4eab..290ad0563320d6bd9f59212d0cd70b765336441d 100644 (file)
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
 
        /* device used for DMA mapping */
-       arch_setup_dma_ops(dev, 0, 0, NULL, false);
+       set_dma_ops(dev, get_dma_ops(&pdev->dev));
        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
        if (err) {
                dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
index 0b31f8502adae2e86c292fb99437ee943c0794bf..6e67d22fd0d54f69e5ee3358717e7ed539fad952 100644 (file)
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
                goto no_mem;
        }
 
+       set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
+
        ret = platform_device_add_data(pdev, &data, sizeof(data));
        if (ret)
                goto err;
index e13aa064a8e943da7c9538e88bc425f299e944ca..6b15a507999c42749165482aa5e7d26a4d3dcf1c 100644 (file)
@@ -29,10 +29,9 @@ enum _dsm_rst_type {
        HNS_ROCE_RESET_FUNC     = 0x7,
 };
 
-const u8 hns_dsaf_acpi_dsm_uuid[] = {
-       0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
-       0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
-};
+const guid_t hns_dsaf_acpi_dsm_guid =
+       GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
+                 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
 
 static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
 {
@@ -151,7 +150,7 @@ static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
        argv4.package.elements = obj_args;
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+                               &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
        if (!obj) {
                dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
                         port_type, port);
@@ -434,7 +433,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
        argv4.package.elements = &obj_args,
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
 
        if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -474,7 +473,7 @@ int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
        argv4.package.elements = &obj_args,
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_GET_SFP_STAT_FUNC, &argv4);
 
        if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -565,7 +564,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
        argv4.package.elements = obj_args;
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_SERDES_LP_FUNC, &argv4);
        if (!obj) {
                dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
index b8fab149690f880394f0d973560aecca69d1171e..e95795b3c84160c6dd567c3a725d66f7886fe6fe 100644 (file)
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
 
                /* Force 1000M Link, Default is 0x0200 */
                phy_write(phy_dev, 7, 0x20C);
-               phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
 
-               /* Enable PHY loop-back */
+               /* Powerup Fiber */
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+               val = phy_read(phy_dev, COPPER_CONTROL_REG);
+               val &= ~PHY_POWER_DOWN;
+               phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
+               /* Enable Phy Loopback */
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
                val = phy_read(phy_dev, COPPER_CONTROL_REG);
                val |= PHY_LOOP_BACK;
                val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
                phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
                phy_write(phy_dev, 1, 0x400);
                phy_write(phy_dev, 7, 0x200);
+
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+               val = phy_read(phy_dev, COPPER_CONTROL_REG);
+               val |= PHY_POWER_DOWN;
+               phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
                phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
                phy_write(phy_dev, 9, 0xF00);
 
index 8209affa75c3e5e0419634740d6985970f61fa67..16486dff14933807d47b2a99f970ffbe3996b3a2 100644 (file)
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
                                 SOF_TIMESTAMPING_RX_HARDWARE |
                                 SOF_TIMESTAMPING_RAW_HARDWARE;
 
-       info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
-                        (BIT(1) << HWTSTAMP_TX_ON);
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+                        BIT(HWTSTAMP_TX_ON);
 
-       info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
-                          (BIT(1) << HWTSTAMP_FILTER_ALL);
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_ALL);
 
        return 0;
 }
index 41cd22a223dccbd9dae460331525a1fc2414be9e..277f4de303751d6328a6bcf3282f28e6d5565e5d 100644 (file)
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
        return netdev;
 
 err_cleanup_nic:
-       profile->cleanup(priv);
+       if (profile->cleanup)
+               profile->cleanup(priv);
        free_netdev(netdev);
 
        return NULL;
index 79462c0368a0781ca7a38354726ed628097c0c89..46984a52a94bb7fad172704b177bdf1c81ef27b8 100644 (file)
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
        params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        params->num_tc                = 1;
        params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+       mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
 
 static void mlx5e_build_rep_netdev(struct net_device *netdev)
index ec63158ab64330c939ffa4ca8c001f8134f8e4e2..9df9fc0d26f5b89457b1406198fa5c51d50536b3 100644 (file)
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
        {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_source[4])},
        {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE,  2, offsetof(struct pedit_headers, eth.h_proto)},
 
-       {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
        {MLX5_ACTION_IN_FIELD_OUT_IP_TTL,  1, offsetof(struct pedit_headers, ip4.ttl)},
        {MLX5_ACTION_IN_FIELD_OUT_SIPV4,   4, offsetof(struct pedit_headers, ip4.saddr)},
        {MLX5_ACTION_IN_FIELD_OUT_DIPV4,   4, offsetof(struct pedit_headers, ip4.daddr)},
index f991f669047e5df2531f043c3934d248bf3099b8..a53e982a68634b5129324fb5af58776804fc5853 100644 (file)
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
        return 0;
 }
 
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int mlx5_devlink_eswitch_check(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev;
-       u16 cur_mlx5_mode, mlx5_mode = 0;
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
 
-       dev = devlink_priv(devlink);
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -EOPNOTSUPP;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
                return -EOPNOTSUPP;
 
-       cur_mlx5_mode = dev->priv.eswitch->mode;
-
-       if (cur_mlx5_mode == SRIOV_NONE)
+       if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
+       return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       u16 cur_mlx5_mode, mlx5_mode = 0;
+       int err;
+
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
+
+       cur_mlx5_mode = dev->priv.eswitch->mode;
+
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
 
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
-       struct mlx5_core_dev *dev;
-
-       dev = devlink_priv(devlink);
-
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       int err;
 
-       if (dev->priv.eswitch->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
-       int num_vports = esw->enabled_vports;
        int err, vport;
        u8 mlx5_mode;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
        case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        if (err)
                goto out;
 
-       for (vport = 1; vport < num_vports; vport++) {
+       for (vport = 1; vport < esw->enabled_vports; vport++) {
                err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
                if (err) {
                        esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
+       int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
            (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
+       int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        *encap = esw->offloads.encap;
        return 0;
index 4f577a5abf884645910203aace25ad9605e171d8..13be264587f135737887688005ed7ba872b54157 100644 (file)
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
        },
 };
 
-#define FW_INIT_TIMEOUT_MILI   2000
-#define FW_INIT_WAIT_MS                2
+#define FW_INIT_TIMEOUT_MILI           2000
+#define FW_INIT_WAIT_MS                        2
+#define FW_PRE_INIT_TIMEOUT_MILI       10000
 
 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
 {
@@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
         */
        dev->state = MLX5_DEVICE_STATE_UP;
 
+       /* wait for firmware to accept initialization segments configurations
+        */
+       err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+       if (err) {
+               dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+                       FW_PRE_INIT_TIMEOUT_MILI);
+               goto out;
+       }
+
        err = mlx5_cmd_init(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
index b7e4345c990d55454f52cbcb9c7c4d5df66ef176..019cef1d3cf72ce2d34b3a36283cabf9227c9879 100644 (file)
@@ -661,8 +661,6 @@ restore_filters:
                up_write(&vf->efx->filter_sem);
                mutex_unlock(&vf->efx->mac_lock);
 
-               up_write(&vf->efx->filter_sem);
-
                rc2 = efx_net_open(vf->efx->net_dev);
                if (rc2)
                        goto reset_nic;
index d16d11bfc046467c41edab070bd3015bd932d338..6e4cbc6ce0efd9843a55341ec47eb76fd9932327 100644 (file)
@@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_skbuff_dma[first_entry].buf = des;
        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
-       tx_q->tx_skbuff[first_entry] = skb;
 
        first->des0 = cpu_to_le32(des);
 
@@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
 
+       /* Only the last descriptor gets to point to the skb. */
+       tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+
+       /* We've used all descriptors we need for this skb, however,
+        * advance cur_tx so that it references a fresh descriptor.
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
 
        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        first = desc;
 
-       tx_q->tx_skbuff[first_entry] = skb;
-
        enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
        if (enh_desc)
@@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                                                skb->len);
        }
 
-       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+       /* Only the last descriptor gets to point to the skb. */
+       tx_q->tx_skbuff[entry] = skb;
 
+       /* We've used all descriptors we need for this skb, however,
+        * advance cur_tx so that it references a fresh descriptor.
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
        tx_q->cur_tx = entry;
 
        if (netif_msg_pktdata(priv)) {
index c7c1e9906500fd5be3d386ea4c43848caaff6ac1..d231042f19d6462018bc10fee0c789196c51abf9 100644 (file)
@@ -442,7 +442,7 @@ struct brcmf_fw {
        const char *nvram_name;
        u16 domain_nr;
        u16 bus_nr;
-       void (*done)(struct device *dev, const struct firmware *fw,
+       void (*done)(struct device *dev, int err, const struct firmware *fw,
                     void *nvram_image, u32 nvram_len);
 };
 
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
        if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
                goto fail;
 
-       fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+       fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
        kfree(fwctx);
        return;
 
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
        release_firmware(fwctx->code);
-       device_release_driver(fwctx->dev);
+       fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
        kfree(fwctx);
 }
 
 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
 {
        struct brcmf_fw *fwctx = ctx;
-       int ret;
+       int ret = 0;
 
        brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-       if (!fw)
+       if (!fw) {
+               ret = -ENOENT;
                goto fail;
-
-       /* only requested code so done here */
-       if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
-               fwctx->done(fwctx->dev, fw, NULL, 0);
-               kfree(fwctx);
-               return;
        }
+       /* only requested code so done here */
+       if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+               goto done;
+
        fwctx->code = fw;
        ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
                                      fwctx->dev, GFP_KERNEL, fwctx,
                                      brcmf_fw_request_nvram_done);
 
-       if (!ret)
-               return;
-
-       brcmf_fw_request_nvram_done(NULL, fwctx);
+       /* pass NULL to nvram callback for bcm47xx fallback */
+       if (ret)
+               brcmf_fw_request_nvram_done(NULL, fwctx);
        return;
 
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
-       device_release_driver(fwctx->dev);
+done:
+       fwctx->done(fwctx->dev, ret, fw, NULL, 0);
        kfree(fwctx);
 }
 
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
                                const char *code, const char *nvram,
-                               void (*fw_cb)(struct device *dev,
+                               void (*fw_cb)(struct device *dev, int err,
                                              const struct firmware *fw,
                                              void *nvram_image, u32 nvram_len),
                                u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
                           const char *code, const char *nvram,
-                          void (*fw_cb)(struct device *dev,
+                          void (*fw_cb)(struct device *dev, int err,
                                         const struct firmware *fw,
                                         void *nvram_image, u32 nvram_len))
 {
index d3c9f0d52ae3326eb18a76aaf735bb602a211a35..8fa4b7e1ab3db71c2522aa9a275ac9e6393eb0a7 100644 (file)
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
  */
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
                                const char *code, const char *nvram,
-                               void (*fw_cb)(struct device *dev,
+                               void (*fw_cb)(struct device *dev, int err,
                                              const struct firmware *fw,
                                              void *nvram_image, u32 nvram_len),
                                u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
                           const char *code, const char *nvram,
-                          void (*fw_cb)(struct device *dev,
+                          void (*fw_cb)(struct device *dev, int err,
                                         const struct firmware *fw,
                                         void *nvram_image, u32 nvram_len));
 
index 72373e59308e8fe54cf14090efd135a045273f4a..f59642b2c935a503f36600f4573861a7597ad267 100644 (file)
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
        struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        struct brcmf_fws_mac_descriptor *entry;
 
-       if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
+       if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
                return;
 
        entry = &fws->desc.iface[ifp->ifidx];
index f36b96dc6acdfc2160ba35e187a46356a3b41c28..f878706613e679515410e573eb770f1b1a28ef26 100644 (file)
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
        .write32 = brcmf_pcie_buscore_write32,
 };
 
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+                            const struct firmware *fw,
                             void *nvram, u32 nvram_len)
 {
-       struct brcmf_bus *bus = dev_get_drvdata(dev);
-       struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
-       struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+       struct brcmf_bus *bus;
+       struct brcmf_pciedev *pcie_bus_dev;
+       struct brcmf_pciedev_info *devinfo;
        struct brcmf_commonring **flowrings;
-       int ret;
        u32 i;
 
+       /* check firmware loading result */
+       if (ret)
+               goto fail;
+
+       bus = dev_get_drvdata(dev);
+       pcie_bus_dev = bus->bus_priv.pcie;
+       devinfo = pcie_bus_dev->devinfo;
        brcmf_pcie_attach(devinfo);
 
        /* Some of the firmwares have the size of the memory of the device
index e03450059b06c0bfe510148f985c19668bcd3dff..5653d6dd38f6fe5c5132f2d7940facd31bef6549 100644 (file)
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
        .get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
                                         const struct firmware *code,
                                         void *nvram, u32 nvram_len)
 {
-       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-       struct brcmf_sdio *bus = sdiodev->bus;
-       int err = 0;
+       struct brcmf_bus *bus_if;
+       struct brcmf_sdio_dev *sdiodev;
+       struct brcmf_sdio *bus;
        u8 saveclk;
 
-       brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+       brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+       bus_if = dev_get_drvdata(dev);
+       sdiodev = bus_if->bus_priv.sdio;
+       if (err)
+               goto fail;
 
        if (!bus_if->drvr)
                return;
 
+       bus = sdiodev->bus;
+
        /* try to download image and nvram to the dongle */
        bus->alp_only = true;
        err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ release:
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
        device_release_driver(dev);
+       device_release_driver(&sdiodev->func[2]->dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
index e4d545f9edeef6f119a0b67f50db679bedfcd91f..0eea48e73331d57297099266b1725df2be35a565 100644 (file)
@@ -1159,17 +1159,18 @@ fail:
        return ret;
 }
 
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
                                   const struct firmware *fw,
                                   void *nvram, u32 nvlen)
 {
        struct brcmf_bus *bus = dev_get_drvdata(dev);
-       struct brcmf_usbdev_info *devinfo;
-       int ret;
+       struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+       if (ret)
+               goto error;
 
        brcmf_dbg(USB, "Start fw downloading\n");
 
-       devinfo = bus->bus_priv.usb->devinfo;
        ret = check_file(fw->data);
        if (ret < 0) {
                brcmf_err("invalid firmware\n");
index c00238491673766e05bc5bd2d2d3bec4aac30484..7b3b6fd63d7d7caa21bbc9c517c4e28910b0548b 100644 (file)
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
        .link_is_up             = xeon_link_is_up,
        .db_ioread              = skx_db_ioread,
        .db_iowrite             = skx_db_iowrite,
-       .db_size                = sizeof(u64),
+       .db_size                = sizeof(u32),
        .ntb_ctl                = SKX_NTBCNTL_OFFSET,
        .mw_bar                 = {2, 4},
 };
index 02ca45fdd89203f31246f552811b1264059232d6..10e5bf4601398c8723d82b5f3340f9b246d341e5 100644 (file)
@@ -177,14 +177,12 @@ struct ntb_transport_qp {
        u64 rx_err_ver;
        u64 rx_memcpy;
        u64 rx_async;
-       u64 dma_rx_prep_err;
        u64 tx_bytes;
        u64 tx_pkts;
        u64 tx_ring_full;
        u64 tx_err_no_buf;
        u64 tx_memcpy;
        u64 tx_async;
-       u64 dma_tx_prep_err;
 };
 
 struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
 #define QP_TO_MW(nt, qp)       ((qp) % nt->mw_count)
 #define NTB_QP_DEF_NUM_ENTRIES 100
 #define NTB_LINK_DOWN_TIMEOUT  10
-#define DMA_RETRIES            20
-#define DMA_OUT_RESOURCE_TO    msecs_to_jiffies(50)
 
 static void ntb_transport_rxc_db(unsigned long data);
 static const struct ntb_ctx_ops ntb_transport_ops;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "free tx - \t%u\n",
                               ntb_transport_tx_free_entry(qp));
-       out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "DMA tx prep err - \t%llu\n",
-                              qp->dma_tx_prep_err);
-       out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "DMA rx prep err - \t%llu\n",
-                              qp->dma_rx_prep_err);
 
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "\n");
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
        if (!mw->virt_addr)
                return -ENOMEM;
 
-       if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+       if (mw_num < qp_count % mw_count)
                num_qps_mw = qp_count / mw_count + 1;
        else
                num_qps_mw = qp_count / mw_count;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
        qp->tx_err_no_buf = 0;
        qp->tx_memcpy = 0;
        qp->tx_async = 0;
-       qp->dma_tx_prep_err = 0;
-       qp->dma_rx_prep_err = 0;
 }
 
 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->event_handler = NULL;
        ntb_qp_link_down_reset(qp);
 
-       if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+       if (mw_num < qp_count % mw_count)
                num_qps_mw = qp_count / mw_count + 1;
        else
                num_qps_mw = qp_count / mw_count;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
        qp_count = ilog2(qp_bitmap);
        if (max_num_clients && max_num_clients < qp_count)
                qp_count = max_num_clients;
-       else if (mw_count < qp_count)
-               qp_count = mw_count;
+       else if (nt->mw_count < qp_count)
+               qp_count = nt->mw_count;
 
        qp_bitmap &= BIT_ULL(qp_count) - 1;
 
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
-       int retries = 0;
 
        len = entry->len;
        device = chan->device;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
 
        unmap->from_cnt = 1;
 
-       for (retries = 0; retries < DMA_RETRIES; retries++) {
-               txd = device->device_prep_dma_memcpy(chan,
-                                                    unmap->addr[1],
-                                                    unmap->addr[0], len,
-                                                    DMA_PREP_INTERRUPT);
-               if (txd)
-                       break;
-
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(DMA_OUT_RESOURCE_TO);
-       }
-
-       if (!txd) {
-               qp->dma_rx_prep_err++;
+       txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+                                            unmap->addr[0], len,
+                                            DMA_PREP_INTERRUPT);
+       if (!txd)
                goto err_get_unmap;
-       }
 
        txd->callback_result = ntb_rx_copy_callback;
        txd->callback_param = entry;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
        struct dmaengine_unmap_data *unmap;
        dma_addr_t dest;
        dma_cookie_t cookie;
-       int retries = 0;
 
        device = chan->device;
        dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
 
        unmap->to_cnt = 1;
 
-       for (retries = 0; retries < DMA_RETRIES; retries++) {
-               txd = device->device_prep_dma_memcpy(chan, dest,
-                                                    unmap->addr[0], len,
-                                                    DMA_PREP_INTERRUPT);
-               if (txd)
-                       break;
-
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(DMA_OUT_RESOURCE_TO);
-       }
-
-       if (!txd) {
-               qp->dma_tx_prep_err++;
+       txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+                                            DMA_PREP_INTERRUPT);
+       if (!txd)
                goto err_get_unmap;
-       }
 
        txd->callback_result = ntb_tx_copy_callback;
        txd->callback_param = entry;
index 434e1d474f3340e1d35b48c924a6bebfbfb0fa67..5cab2831ce99ae39dac8fe8a1c8b2bd216a9e901 100644 (file)
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
 
 static unsigned int seg_order = 19; /* 512K */
 module_param(seg_order, uint, 0644);
-MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
+MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
 
 static unsigned int run_order = 32; /* 4G */
 module_param(run_order, uint, 0644);
-MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
+MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
 
 static bool use_dma; /* default to 0 */
 module_param(use_dma, bool, 0644);
index ae00dc0d97917392cd3f447d1e3bb937a1a67253..4c989bb9a8a03fea2f76f1c0ad49cbfb842d7fc3 100644 (file)
@@ -222,13 +222,6 @@ struct device *nd_btt_create(struct nd_region *nd_region)
        return dev;
 }
 
-static bool uuid_is_null(u8 *uuid)
-{
-       static const u8 null_uuid[16];
-
-       return (memcmp(uuid, null_uuid, 16) == 0);
-}
-
 /**
  * nd_btt_arena_is_valid - check if the metadata layout is valid
  * @nd_btt:    device with BTT geometry and backing device info
@@ -249,7 +242,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
        if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
                return false;
 
-       if (!uuid_is_null(super->parent_uuid))
+       if (!guid_is_null((guid_t *)&super->parent_uuid))
                if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
                        return false;
 
index 990e6fb32a636201078da585372d6ddfed97c9bd..c190d7e36900175f41110e97b8c327f93fc47fdd 100644 (file)
@@ -58,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        kref_init(&host->ref);
        memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-       uuid_be_gen(&host->id);
+       uuid_gen(&host->id);
 
        list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
-       uuid_be_gen(&host->id);
+       uuid_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
                "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
@@ -395,7 +395,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+       uuid_copy(&data->hostid, &ctrl->opts->host->id);
        data->cntlid = cpu_to_le16(0xffff);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -454,7 +454,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+       uuid_copy(&data->hostid, &ctrl->opts->host->id);
        data->cntlid = cpu_to_le16(ctrl->cntlid);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
index f5a9c1fb186f2d5278ecd72eeaf93b1b7d75e6e8..29be7600689d41ed5549d5fc070c070cb1d8f7ae 100644 (file)
@@ -36,7 +36,7 @@ struct nvmf_host {
        struct kref             ref;
        struct list_head        list;
        char                    nqn[NVMF_NQN_SIZE];
-       uuid_be                 id;
+       uuid_                 id;
 };
 
 /**
index 92964cef0f4be5795bed3e874407c74a3e3cc725..5ee4c71d168d182b4b48aff8c71827ae3b38e472 100644 (file)
@@ -878,8 +878,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
        assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
        /* Linux supports only Dynamic controllers */
        assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
-       memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
-               min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+       uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
        strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
                min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
        strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
index cfc5c7fb0ab78411f8d6e96ab7ffaaad241b6244..8ff6e430b30afe14730355e2ffd029a482aef578 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/uuid.h>
 #include <linux/nvme.h>
 #include <linux/configfs.h>
 #include <linux/rcupdate.h>
index 001860361434623fcf84de1c2533df03835de8a0..47070cff508c4496e51060e1167d33d426ad1e93 100644 (file)
 #include "pci.h"
 
 /*
- * The UUID is defined in the PCI Firmware Specification available here:
+ * The GUID is defined in the PCI Firmware Specification available here:
  * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  */
-const u8 pci_acpi_dsm_uuid[] = {
-       0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
-       0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
-};
+const guid_t pci_acpi_dsm_guid =
+       GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
+                 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
 
 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
@@ -680,7 +679,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
        if (!pci_is_root_bus(bus))
                return;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
                                RESET_DELAY_DSM, NULL);
        if (!obj)
                return;
@@ -745,7 +744,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
        if (bridge->ignore_reset_delay)
                pdev->d3cold_delay = 0;
 
-       obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
+       obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
                                FUNCTION_DELAY_DSM, NULL);
        if (!obj)
                return;
index 51357377efbce65d4ad8454ffd678fc4df8de7a2..2d8db3ead6e82f91106db1ae883d354e2fdb5c71 100644 (file)
@@ -172,7 +172,7 @@ static int dsm_get_label(struct device *dev, char *buf,
        if (!handle)
                return -1;
 
-       obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+       obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
                                DEVICE_LABEL_DSM, NULL);
        if (!obj)
                return -1;
@@ -212,7 +212,7 @@ static bool device_has_dsm(struct device *dev)
        if (!handle)
                return false;
 
-       return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+       return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
                                1 << DEVICE_LABEL_DSM);
 }
 
index 1482d132fbb879ab39ec62dadcd48e9445feced9..e432ec887479d32b2be5afcc1a26e295909b04cb 100644 (file)
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
        .flags        = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void amd_gpio_irq_handler(struct irq_desc *desc)
+#define PIN_IRQ_PENDING        (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
+
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
 {
-       u32 i;
-       u32 off;
-       u32 reg;
-       u32 pin_reg;
-       u64 reg64;
-       int handled = 0;
-       unsigned int irq;
+       struct amd_gpio *gpio_dev = dev_id;
+       struct gpio_chip *gc = &gpio_dev->gc;
+       irqreturn_t ret = IRQ_NONE;
+       unsigned int i, irqnr;
        unsigned long flags;
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-       struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-       struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+       u32 *regs, regval;
+       u64 status, mask;
 
-       chained_irq_enter(chip, desc);
-       /*enable GPIO interrupt again*/
+       /* Read the wake status */
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-       reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
-       reg64 = reg;
-       reg64 = reg64 << 32;
-
-       reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
-       reg64 |= reg;
+       status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+       status <<= 32;
+       status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-       /*
-        * first 46 bits indicates interrupt status.
-        * one bit represents four interrupt sources.
-       */
-       for (off = 0; off < 46 ; off++) {
-               if (reg64 & BIT(off)) {
-                       for (i = 0; i < 4; i++) {
-                               pin_reg = readl(gpio_dev->base +
-                                               (off * 4 + i) * 4);
-                               if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
-                                       (pin_reg & BIT(WAKE_STS_OFF))) {
-                                       irq = irq_find_mapping(gc->irqdomain,
-                                                               off * 4 + i);
-                                       generic_handle_irq(irq);
-                                       writel(pin_reg,
-                                               gpio_dev->base
-                                               + (off * 4 + i) * 4);
-                                       handled++;
-                               }
-                       }
+       /* Bit 0-45 contain the relevant status bits */
+       status &= (1ULL << 46) - 1;
+       regs = gpio_dev->base;
+       for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
+               if (!(status & mask))
+                       continue;
+               status &= ~mask;
+
+               /* Each status bit covers four pins */
+               for (i = 0; i < 4; i++) {
+                       regval = readl(regs + i);
+                       if (!(regval & PIN_IRQ_PENDING))
+                               continue;
+                       irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+                       generic_handle_irq(irq);
+                       /* Clear interrupt */
+                       writel(regval, regs + i);
+                       ret = IRQ_HANDLED;
                }
        }
 
-       if (handled == 0)
-               handle_bad_irq(desc);
-
+       /* Signal EOI to the GPIO unit */
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-       reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
-       reg |= EOI_MASK;
-       writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+       regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+       regval |= EOI_MASK;
+       writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-       chained_irq_exit(chip, desc);
+       return ret;
 }
 
 static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       gpiochip_set_chained_irqchip(&gpio_dev->gc,
-                                &amd_gpio_irqchip,
-                                irq_base,
-                                amd_gpio_irq_handler);
+       ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
+                              KBUILD_MODNAME, gpio_dev);
+       if (ret)
+               goto out2;
+
        platform_set_drvdata(pdev, gpio_dev);
 
        dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
index d3c5f5dfbbd7974258e4105e3da7114ba7b2324a..222b6685b09f2d72f144daadbf3a3b67a06c1109 100644 (file)
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
                break;
        case PIN_CONFIG_OUTPUT:
                __stm32_gpio_set(bank, offset, arg);
-               ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+               ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
                break;
        default:
                ret = -EINVAL;
index 8bc7ee1a8ca81626829329e80831ffa2d57b8c63..507512cc478b1dd2632e033ac7d9a2dc99b8ab5d 100644 (file)
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
                QEDI_ERR(&qedi->dbg_ctx,
                         "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
                         protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
-               WARN_ON(1);
        }
 }
 
index 09a294634bc7e8898a2d209a9a5cef3d50eb8f32..879d3b7462f94f38bba4618aecf51dde947aa458 100644 (file)
@@ -1499,11 +1499,9 @@ err_idx:
 
 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
 {
-       if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+       if (!test_and_clear_bit(idx, qedi->task_idx_map))
                QEDI_ERR(&qedi->dbg_ctx,
                         "FW task context, already cleared, tid=0x%x\n", idx);
-               WARN_ON(1);
-       }
 }
 
 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
index dc095a292c61b4ad64f63fcd9685d9e047acf7b6..3be980d472681a4410d681e99d649b99cddb3cea 100644 (file)
@@ -245,7 +245,7 @@ struct sdebug_dev_info {
        unsigned int channel;
        unsigned int target;
        u64 lun;
-       uuid_be lu_name;
+       uuid_t lu_name;
        struct sdebug_host_info *sdbg_host;
        unsigned long uas_bm[1];
        atomic_t num_in_q;
@@ -965,7 +965,7 @@ static const u64 naa3_comp_c = 0x3111111000000000ULL;
 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
                          int target_dev_id, int dev_id_num,
                          const char *dev_id_str, int dev_id_str_len,
-                         const uuid_be *lu_name)
+                         const uuid_t *lu_name)
 {
        int num, port_a;
        char b[32];
@@ -3568,7 +3568,7 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
 }
 
 static bool got_shared_uuid;
-static uuid_be shared_uuid;
+static uuid_t shared_uuid;
 
 static struct sdebug_dev_info *sdebug_device_create(
                        struct sdebug_host_info *sdbg_host, gfp_t flags)
@@ -3578,12 +3578,12 @@ static struct sdebug_dev_info *sdebug_device_create(
        devip = kzalloc(sizeof(*devip), flags);
        if (devip) {
                if (sdebug_uuid_ctl == 1)
-                       uuid_be_gen(&devip->lu_name);
+                       uuid_gen(&devip->lu_name);
                else if (sdebug_uuid_ctl == 2) {
                        if (got_shared_uuid)
                                devip->lu_name = shared_uuid;
                        else {
-                               uuid_be_gen(&shared_uuid);
+                               uuid_gen(&shared_uuid);
                                got_shared_uuid = true;
                                devip->lu_name = shared_uuid;
                        }
index 0d8f81591bed076fa1f89f7cd27360776488f349..3fdca2cdd8da954b5a9c9d906c8b2d2b5e14f040 100644 (file)
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
         */
        if (dump_payload)
                goto after_immediate_data;
+       /*
+        * Check for underflow case where both EDTL and immediate data payload
+        * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+        * already been set in target_cmd_size_check() as se_cmd->data_length.
+        *
+        * For this special case, fail the command and dump the immediate data
+        * payload.
+        */
+       if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+               cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+               goto after_immediate_data;
+       }
 
        immed_ret = iscsit_handle_immediate_data(cmd, hdr,
                                        cmd->first_burst_len);
@@ -4423,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession(
         * always sleep waiting for RX/TX thread shutdown to complete
         * within iscsit_close_connection().
         */
-       if (!conn->conn_transport->rdma_shutdown)
+       if (!conn->conn_transport->rdma_shutdown) {
                sleep = cmpxchg(&conn->tx_thread_active, true, false);
+               if (!sleep)
+                       return;
+       }
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4440,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid(
 {
        int sleep = 1;
 
-       if (!conn->conn_transport->rdma_shutdown)
+       if (!conn->conn_transport->rdma_shutdown) {
                sleep = cmpxchg(&conn->tx_thread_active, true, false);
+               if (!sleep)
+                       return;
+       }
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
index 9ab7090f7c839c6900cb30ddf7db1b8be4bc78cf..0912de7c0cf8f3ade048de694b4e75f77fe27acd 100644 (file)
@@ -136,7 +136,7 @@ int init_se_kmem_caches(void);
 void   release_se_kmem_caches(void);
 u32    scsi_get_new_index(scsi_index_t);
 void   transport_subsystem_check_init(void);
-void   transport_cmd_finish_abort(struct se_cmd *, int);
+int    transport_cmd_finish_abort(struct se_cmd *, int);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 void   transport_dump_dev_state(struct se_device *, char *, int *);
 void   transport_dump_dev_info(struct se_device *, struct se_lun *,
index dce1e1b47316173329292f90276843d26d32407b..13f47bf4d16b1d790ab470b92127254835e76078 100644 (file)
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
        kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 {
        unsigned long flags;
        bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
                transport_send_task_abort(cmd);
        }
 
-       transport_cmd_finish_abort(cmd, remove);
+       return transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -184,8 +184,8 @@ void core_tmr_abort_task(
                cancel_work_sync(&se_cmd->work);
                transport_wait_for_tasks(se_cmd);
 
-               transport_cmd_finish_abort(se_cmd, true);
-               target_put_sess_cmd(se_cmd);
+               if (!transport_cmd_finish_abort(se_cmd, true))
+                       target_put_sess_cmd(se_cmd);
 
                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
                                " ref_tag: %llu\n", ref_tag);
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list(
                cancel_work_sync(&cmd->work);
                transport_wait_for_tasks(cmd);
 
-               transport_cmd_finish_abort(cmd, 1);
-               target_put_sess_cmd(cmd);
+               if (!transport_cmd_finish_abort(cmd, 1))
+                       target_put_sess_cmd(cmd);
        }
 }
 
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list(
                cancel_work_sync(&cmd->work);
                transport_wait_for_tasks(cmd);
 
-               core_tmr_handle_tas_abort(cmd, tas);
-               target_put_sess_cmd(cmd);
+               if (!core_tmr_handle_tas_abort(cmd, tas))
+                       target_put_sess_cmd(cmd);
        }
 }
 
index 6025935036c976edeeee0d7a91df79a66aa84a2b..f1b3a46bdcaffaf8a301569ef7e328ad2c47087f 100644 (file)
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
                percpu_ref_put(&lun->lun_ref);
 }
 
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
        bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+       int ret = 0;
 
        if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
                transport_lun_remove_cmd(cmd);
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
                cmd->se_tfo->aborted_task(cmd);
 
        if (transport_cmd_check_stop_to_fabric(cmd))
-               return;
+               return 1;
        if (remove && ack_kref)
-               transport_put_cmd(cmd);
+               ret = transport_put_cmd(cmd);
+
+       return ret;
 }
 
 static void target_complete_failure_work(struct work_struct *work)
index 9413c4abf0b93cca1681e2e3a46d083219cf5a5c..a9ec94ed7a425a303280c3cdba589d71b642011c 100644 (file)
@@ -23,7 +23,7 @@ enum int3400_thermal_uuid {
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
-static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
+static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
@@ -141,10 +141,10 @@ static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
                }
 
                for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
-                       u8 uuid[16];
+                       guid_t guid;
 
-                       acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
-                       if (!strncmp(uuid, objb->buffer.pointer, 16)) {
+                       guid_parse(int3400_thermal_uuids[j], &guid);
+                       if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) {
                                priv->uuid_bitmap |= (1 << j);
                                break;
                        }
index 84a2cebfc712023182dbb4622cbc355b3545310d..fe851544d7fbaad82d3fdadc397da734d0a5bf4b 100644 (file)
@@ -42,7 +42,7 @@
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
 
-#define PCI_INTEL_BXT_DSM_UUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
 #define PCI_INTEL_BXT_STATE_D0         0
 #define PCI_INTEL_BXT_STATE_D3         3
  * struct dwc3_pci - Driver private structure
  * @dwc3: child dwc3 platform_device
  * @pci: our link to PCI bus
- * @uuid: _DSM UUID
+ * @guid: _DSM GUID
  * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
  */
 struct dwc3_pci {
        struct platform_device *dwc3;
        struct pci_dev *pci;
 
-       u8 uuid[16];
+       guid_t guid;
 
        unsigned int has_dsm_for_pm:1;
 };
@@ -120,7 +120,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
 
                if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
                                pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
-                       acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+                       guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
                        dwc->has_dsm_for_pm = true;
                }
 
@@ -292,7 +292,7 @@ static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
        tmp.type = ACPI_TYPE_INTEGER;
        tmp.integer.value = param;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
                        1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
        if (!obj) {
                dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
index 1bcf971141c09a69f3cd1674cca282bcc6ec8d46..783e6687bf4a528404523d04330d2655a0a512ce 100644 (file)
@@ -216,13 +216,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 #ifdef CONFIG_ACPI
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
 {
-       static const u8 intel_dsm_uuid[] = {
-               0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
-               0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
-       };
+       static const guid_t intel_dsm_guid =
+               GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
+                         0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
        union acpi_object *obj;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
                                NULL);
        ACPI_FREE(obj);
 }
index 07397bddefa3b76fc0a5e0fbd68ad9d9bb8e8ea8..81251aaa20f92043f7a3ddbf2fe6c8103407a84c 100644 (file)
@@ -55,13 +55,13 @@ struct ucsi {
 
 static int ucsi_acpi_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl)
 {
-       uuid_le uuid = UUID_LE(0x6f8398c2, 0x7ca4, 0x11e4,
-                              0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
+       guid_t guid = GUID_INIT(0x6f8398c2, 0x7ca4, 0x11e4,
+                               0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
        union acpi_object *obj;
 
        ucsi->data->ctrl.raw_cmd = ctrl->raw_cmd;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), uuid.b, 1, 1, NULL);
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), &guid, 1, 1, NULL);
        if (!obj) {
                dev_err(ucsi->dev, "%s: failed to evaluate _DSM\n", __func__);
                return -EIO;
index d5a7b21fa3f198ef43598d170cd61f7494ac4826..c2ce252890277464538a6139773a4ffb7b9bf65b 100644 (file)
@@ -105,8 +105,8 @@ enum wcove_typec_role {
        WCOVE_ROLE_DEVICE,
 };
 
-static uuid_le uuid = UUID_LE(0x482383f0, 0x2876, 0x4e49,
-                             0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
+                              0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
 
 static int wcove_typec_func(struct wcove_typec *wcove,
                            enum wcove_typec_func func, int param)
@@ -118,7 +118,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
        tmp.type = ACPI_TYPE_INTEGER;
        tmp.integer.value = param;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), uuid.b, 1, func,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
                                &argv4);
        if (!obj) {
                dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@@ -314,7 +314,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), uuid.b, 0, 0x1f)) {
+       if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
                dev_err(&pdev->dev, "Missing _DSM functions\n");
                return -ENODEV;
        }
index 4ac2ca8a76561952798f7c49bdd292719d0439a8..bf13d1ec51f3bee0c92e9f9c968c21c13bd72173 100644 (file)
@@ -233,12 +233,12 @@ static int tmem_cleancache_init_fs(size_t pagesize)
        return xen_tmem_new_pool(uuid_private, 0, pagesize);
 }
 
-static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
 {
        struct tmem_pool_uuid shared_uuid;
 
-       shared_uuid.uuid_lo = *(u64 *)uuid;
-       shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
+       shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
+       shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
index 3062cceb5c2aebcc4a15e3c52d1b26ecea82f20d..782d4d05a53ba332e2115891e343d41612cb1aa0 100644 (file)
@@ -350,7 +350,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 {
        struct sockaddr_rxrpc srx;
        struct afs_server *server;
-       struct uuid_v1 *r;
+       struct afs_uuid *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -380,7 +380,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
@@ -453,7 +453,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
 static void SRXAFSCB_ProbeUuid(struct work_struct *work)
 {
        struct afs_call *call = container_of(work, struct afs_call, work);
-       struct uuid_v1 *r = call->request;
+       struct afs_uuid *r = call->request;
 
        struct {
                __be32  match;
@@ -476,7 +476,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
  */
 static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 {
-       struct uuid_v1 *r;
+       struct afs_uuid *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -502,15 +502,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
                b = call->buffer;
                r = call->request;
-               r->time_low                     = b[0];
-               r->time_mid                     = htons(ntohl(b[1]));
-               r->time_hi_and_version          = htons(ntohl(b[2]));
+               r->time_low                     = ntohl(b[0]);
+               r->time_mid                     = ntohl(b[1]);
+               r->time_hi_and_version          = ntohl(b[2]);
                r->clock_seq_hi_and_reserved    = ntohl(b[3]);
                r->clock_seq_low                = ntohl(b[4]);
 
index 393672997cc23d4e5688dc77421ae81b0df95b48..4e25566066238d896b11108a7621a100a266bb40 100644 (file)
@@ -410,6 +410,15 @@ struct afs_interface {
        unsigned        mtu;            /* MTU of interface */
 };
 
+struct afs_uuid {
+       __be32          time_low;                       /* low part of timestamp */
+       __be16          time_mid;                       /* mid part of timestamp */
+       __be16          time_hi_and_version;            /* high part of timestamp and version  */
+       __u8            clock_seq_hi_and_reserved;      /* clock seq hi and variant */
+       __u8            clock_seq_low;                  /* clock seq low */
+       __u8            node[6];                        /* spatially unique node ID (MAC addr) */
+};
+
 /*****************************************************************************/
 /*
  * cache.c
@@ -544,7 +553,7 @@ extern int afs_drop_inode(struct inode *);
  * main.c
  */
 extern struct workqueue_struct *afs_wq;
-extern struct uuid_v1 afs_uuid;
+extern struct afs_uuid afs_uuid;
 
 /*
  * misc.c
index 51d7d17bca5756b9e4dbbb05408689e2d936a477..9944770849da20613af2c315d3f682f068d5f6c7 100644 (file)
@@ -31,7 +31,7 @@ static char *rootcell;
 module_param(rootcell, charp, 0);
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
-struct uuid_v1 afs_uuid;
+struct afs_uuid afs_uuid;
 struct workqueue_struct *afs_wq;
 
 /*
index 734cbf8d9676bd6f6f26561249504ccffd9f8360..dd9f1bebb5a3a980b55e5d0fb758c93e4f694722 100644 (file)
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
        int status;
 
        token = (autofs_wqt_t) param->fail.token;
-       status = param->fail.status ? param->fail.status : -ENOENT;
+       status = param->fail.status < 0 ? param->fail.status : -ENOENT;
        return autofs4_wait_release(sbi, token, status);
 }
 
index 0fd081bd2a2f5d3fb4ed18fdcb7a1371cf9f5627..fcef70602b278b48ffd74e97f10d2a57b6968cc3 100644 (file)
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
        if (!is_sync_kiocb(iocb))
                ctx->iocb = iocb;
 
-       if (to->type & ITER_IOVEC)
+       if (to->type == ITER_IOVEC)
                ctx->should_dirty = true;
 
        rc = setup_aio_ctx_iter(ctx, to, READ);
index b08531977daa4084f774c75de33204b2b6fa0902..3b147dc6af6344ee5c5e616466403f2cc211dbcb 100644 (file)
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 
        if (!pages) {
                pages = vmalloc(max_pages * sizeof(struct page *));
-               if (!bv) {
+               if (!pages) {
                        kvfree(bv);
                        return -ENOMEM;
                }
index 27bc360c7ffd7e1081f907c5f080dc4ba439fbfc..a723df3e01978cdca30afbf107772898ce66f33a 100644 (file)
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     struct cifs_fid *fid, __u16 search_flags,
                     struct cifs_search_info *srch_inf)
 {
-       return CIFSFindFirst(xid, tcon, path, cifs_sb,
-                            &fid->netfid, search_flags, srch_inf, true);
+       int rc;
+
+       rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+                          &fid->netfid, search_flags, srch_inf, true);
+       if (rc)
+               cifs_dbg(FYI, "find first failed=%d\n", rc);
+       return rc;
 }
 
 static int
index c58691834eb2b74fa34f3fe2661ed3e211c4d22e..7e48561abd299012616428d28f256906a7c5381f 100644 (file)
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
        kfree(utf16_path);
        if (rc) {
-               cifs_dbg(VFS, "open dir failed\n");
+               cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
                return rc;
        }
 
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
        if (rc) {
-               cifs_dbg(VFS, "query directory failed\n");
+               cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
                SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
        }
        return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 
        sg = init_sg(rqst, sign);
        if (!sg) {
-               cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc);
+               cifs_dbg(VFS, "%s: Failed to init sg", __func__);
+               rc = -ENOMEM;
                goto free_req;
        }
 
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
        iv = kzalloc(iv_len, GFP_KERNEL);
        if (!iv) {
                cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
+               rc = -ENOMEM;
                goto free_sg;
        }
        iv[0] = 3;
index 3cb5c9e2d4e78f641549818fbbad7681b193854d..de50e749ff058d79c67f7462962614c8c835ecdb 100644 (file)
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
        pcreatetime = (__u64 *)value;
        *pcreatetime = CIFS_I(inode)->createtime;
        return sizeof(__u64);
-
-       return rc;
 }
 
 
index 2a6889b3585f068c73091d8895639b7e941d702a..9187f3b07f3e7f7b8546724d83dd06f4d16e7d8b 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                        if (ret < 0)
                                goto out;
                }
+               start_index = indices[pvec.nr - 1] + 1;
        }
 out:
        put_dax(dax_dev);
index 72934df6847150ba50dfbadad78fe10e01d2eadd..904199086490d5fdf05d0eda850d04a3ce572fa5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
        if (write) {
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+               unsigned long ptr_size;
                struct rlimit *rlim;
 
+               /*
+                * Since the stack will hold pointers to the strings, we
+                * must account for them as well.
+                *
+                * The size calculation is the entire vma while each arg page is
+                * built, so each time we get here it's calculating how far it
+                * is currently (rather than each call being just the newly
+                * added size from the arg page).  As a result, we need to
+                * always add the entire size of the pointers, so that on the
+                * last call to get_arg_page() we'll actually have the entire
+                * correct size.
+                */
+               ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+               if (ptr_size > ULONG_MAX - size)
+                       goto fail;
+               size += ptr_size;
+
                acct_arg_size(bprm, size / PAGE_SIZE);
 
                /*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                 *    to work from.
                 */
                rlim = current->signal->rlim;
-               if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
-                       put_page(page);
-                       return NULL;
-               }
+               if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+                       goto fail;
        }
 
        return page;
+
+fail:
+       put_page(page);
+       return NULL;
 }
 
 static void put_arg_page(struct page *page)
index d37c81f327e790cc4a309fd0a12baef0dc2ff4ac..9006cb5857b802e301fa5923feafb8a3e87db884 100644 (file)
@@ -3950,7 +3950,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_qcop = &ext4_qctl_operations;
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 #endif
-       memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
+       memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
 
        INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
        mutex_init(&sbi->s_orphan_lock);
index 83355ec4a92cdeb86d4be8d4630e01e7b0c96a28..0b89b0b7b9f75701e3a5f85680761181ce64f3df 100644 (file)
@@ -1937,7 +1937,7 @@ try_onemore:
        sb->s_time_gran = 1;
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
-       memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+       memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
        sbi->valid_super_block = valid_super_block;
index ed67548b286ccc58d84732d00af5ca281772bd39..b92135c202c25cc409812d095ac5b7ea4936d5dc 100644 (file)
@@ -203,7 +203,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 
        memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
        memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
-       memcpy(s->s_uuid, str->sb_uuid, 16);
+       memcpy(&s->s_uuid, str->sb_uuid, 16);
 }
 
 /**
index 7a515345610c28dc4b35e8aa15f4b5061754c060..e77bc52b468f24b407bb19a4783959ce981be163 100644 (file)
@@ -71,25 +71,14 @@ static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
        return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
 }
 
-static int gfs2_uuid_valid(const u8 *uuid)
-{
-       int i;
-
-       for (i = 0; i < 16; i++) {
-               if (uuid[i])
-                       return 1;
-       }
-       return 0;
-}
-
 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
 {
        struct super_block *s = sdp->sd_vfs;
-       const u8 *uuid = s->s_uuid;
+
        buf[0] = '\0';
-       if (!gfs2_uuid_valid(uuid))
+       if (uuid_is_null(&s->s_uuid))
                return 0;
-       return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
+       return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
 }
 
 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -712,14 +701,13 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
 {
        struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
        struct super_block *s = sdp->sd_vfs;
-       const u8 *uuid = s->s_uuid;
 
        add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
        add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
        if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
                add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
-       if (gfs2_uuid_valid(uuid))
-               add_uevent_var(env, "UUID=%pUB", uuid);
+       if (!uuid_is_null(&s->s_uuid))
+               add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
        return 0;
 }
 
index e71f11b1a180c4c0ff0d3ea30d21b568e3c11511..3bc08c394a3f9525620b256466f4222a9133080e 100644 (file)
@@ -486,7 +486,7 @@ secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
 #endif
 
 static inline int
-uuid_parse(char **mesg, char *buf, unsigned char **puuid)
+nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
 {
        int len;
 
@@ -586,7 +586,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
                        if (strcmp(buf, "fsloc") == 0)
                                err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
                        else if (strcmp(buf, "uuid") == 0)
-                               err = uuid_parse(&mesg, buf, &exp.ex_uuid);
+                               err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
                        else if (strcmp(buf, "secinfo") == 0)
                                err = secinfo_parse(&mesg, buf, &exp);
                        else
index 3b7c937a36b528e67511a23b136215ffaab6d8e4..4689940a953c2f7fc3b3a0e07a7d01c8c09e7d9a 100644 (file)
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
        struct ocfs2_lock_res *lockres;
 
        lockres = &OCFS2_I(inode)->ip_inode_lockres;
+       /* had_lock means that the currect process already takes the cluster
+        * lock previously. If had_lock is 1, we have nothing to do here, and
+        * it will get unlocked where we got the lock.
+        */
        if (!had_lock) {
                ocfs2_remove_holder(lockres, oh);
                ocfs2_inode_unlock(inode, ex);
index ca1646fbcaefe7daf0e205620ca3344a4cf2a44a..83005f486451eaaa211614f22247fb885f24d229 100644 (file)
@@ -2062,7 +2062,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
        bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
        sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
-       memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
+       memcpy(&sb->s_uuid, di->id2.i_super.s_uuid,
               sizeof(di->id2.i_super.s_uuid));
 
        osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
index 3c5384d9b3a549f319b114a782c7daab966a2c28..f70c3778d600c6be63996572bee6fd46ac03440c 100644 (file)
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
                           void *buffer,
                           size_t buffer_size)
 {
-       int ret;
+       int ret, had_lock;
        struct buffer_head *di_bh = NULL;
+       struct ocfs2_lock_holder oh;
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 0);
-       if (ret < 0) {
-               mlog_errno(ret);
-               return ret;
+       had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
+       if (had_lock < 0) {
+               mlog_errno(had_lock);
+               return had_lock;
        }
        down_read(&OCFS2_I(inode)->ip_xattr_sem);
        ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
                                     name, buffer, buffer_size);
        up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
-       ocfs2_inode_unlock(inode, 0);
+       ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
 
        brelse(di_bh);
 
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
 {
        struct buffer_head *di_bh = NULL;
        struct ocfs2_dinode *di;
-       int ret, credits, ref_meta = 0, ref_credits = 0;
+       int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct inode *tl_inode = osb->osb_tl_inode;
        struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
        struct ocfs2_refcount_tree *ref_tree = NULL;
+       struct ocfs2_lock_holder oh;
 
        struct ocfs2_xattr_info xi = {
                .xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
                return -ENOMEM;
        }
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 1);
-       if (ret < 0) {
+       had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
+       if (had_lock < 0) {
+               ret = had_lock;
                mlog_errno(ret);
                goto cleanup_nolock;
        }
@@ -3670,7 +3673,7 @@ cleanup:
                if (ret)
                        mlog_errno(ret);
        }
-       ocfs2_inode_unlock(inode, 1);
+       ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
 cleanup_nolock:
        brelse(di_bh);
        brelse(xbs.xattr_bh);
index 7a44533f4bbf24134a95bdc030bde5779f28457a..33fe6ca929f726adb912814985e534af3e3fa159 100644 (file)
@@ -233,7 +233,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
        return err;
 }
 
-static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_be *uuid)
+static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_t *uuid)
 {
        struct ovl_fh *fh;
        int fh_type, fh_len, dwords;
@@ -284,7 +284,6 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                          struct dentry *upper)
 {
        struct super_block *sb = lower->d_sb;
-       uuid_be *uuid = (uuid_be *) &sb->s_uuid;
        const struct ovl_fh *fh = NULL;
        int err;
 
@@ -294,8 +293,8 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
         * up and a pure upper inode.
         */
        if (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
-           uuid_be_cmp(*uuid, NULL_UUID_BE)) {
-               fh = ovl_encode_fh(lower, uuid);
+           !uuid_is_null(&sb->s_uuid)) {
+               fh = ovl_encode_fh(lower, &sb->s_uuid);
                if (IS_ERR(fh))
                        return PTR_ERR(fh);
        }
index f3136c31e72af24cbb9949449a12d292fc3bf11b..de0d4f742f36eb67ce84456be47bbef977fe1320 100644 (file)
@@ -135,7 +135,7 @@ static struct dentry *ovl_get_origin(struct dentry *dentry,
         * Make sure that the stored uuid matches the uuid of the lower
         * layer where file handle will be decoded.
         */
-       if (uuid_be_cmp(fh->uuid, *(uuid_be *) &mnt->mnt_sb->s_uuid))
+       if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
                goto out;
 
        origin = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
index 0623cebeefff8661d49d65a228ceec6290cee877..10863b4105fa21d89f9fdc560a46287f7ffe2f44 100644 (file)
@@ -56,7 +56,7 @@ struct ovl_fh {
        u8 len;         /* size of this header + size of fid */
        u8 flags;       /* OVL_FH_FLAG_* */
        u8 type;        /* fid_type of fid */
-       uuid_be uuid;   /* uuid of filesystem */
+       uuid_t uuid;    /* uuid of filesystem */
        u8 fid[0];      /* file identifier */
 } __packed;
 
index 0315fea1d589e104ac4f10bf4db3f9f705c3f7ad..f80be4c5df9d13b7009602dd5be2f4c58a5de1ac 100644 (file)
@@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        /*
         * allocate new block and move data
         */
-       switch (fs32_to_cpu(sb, usb1->fs_optim)) {
-           case UFS_OPTSPACE:
+       if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
                request = newcount;
-               if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
-                   > uspi->s_dsize * uspi->s_minfree / (2 * 100))
-                       break;
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-               break;
-           default:
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-       
-           case UFS_OPTTIME:
+               if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
+                       usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+       } else {
                request = uspi->s_fpb;
-               if (uspi->cs_total.cs_nffree < uspi->s_dsize *
-                   (uspi->s_minfree - 2) / 100)
-                       break;
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-               break;
+               if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
+                       usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
        }
        result = ufs_alloc_fragments (inode, cgno, goal, request, err);
        if (result) {
index 9f4590261134085cbabcea1ee9a72382239ba479..f36d6a53687d13fd817f65f99913a54d425e8048 100644 (file)
@@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
         */
        inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
        set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
-       if (inode->i_nlink == 0) {
-               ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-               return -1;
-       }
+       if (inode->i_nlink == 0)
+               return -ESTALE;
 
        /*
         * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -578,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
        i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 
        inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
-       inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
-       inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
-       inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
+       inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+       inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+       inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
        inode->i_mtime.tv_nsec = 0;
        inode->i_atime.tv_nsec = 0;
        inode->i_ctime.tv_nsec = 0;
@@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
         */
        inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
        set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
-       if (inode->i_nlink == 0) {
-               ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-               return -1;
-       }
+       if (inode->i_nlink == 0)
+               return -ESTALE;
 
         /*
          * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
        struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
        struct buffer_head * bh;
        struct inode *inode;
-       int err;
+       int err = -EIO;
 
        UFSD("ENTER, ino %lu\n", ino);
 
@@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
                err = ufs1_read_inode(inode,
                                      ufs_inode + ufs_inotofsbo(inode->i_ino));
        }
-
+       brelse(bh);
        if (err)
                goto bad_inode;
+
        inode->i_version++;
        ufsi->i_lastfrag =
                (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 
        ufs_set_inode_ops(inode);
 
-       brelse(bh);
-
        UFSD("EXIT\n");
        unlock_new_inode(inode);
        return inode;
 
 bad_inode:
        iget_failed(inode);
-       return ERR_PTR(-EIO);
+       return ERR_PTR(err);
 }
 
 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
index d5300adbfd79bafd05217cd7088d1221a4501187..0a4f58a5073cb63feb94c2174c56d9889d2a37a4 100644 (file)
@@ -1210,6 +1210,15 @@ magic_found:
 
        uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
                                              uspi->s_minfree, 100);
+       if (uspi->s_minfree <= 5) {
+               uspi->s_time_to_space = ~0ULL;
+               uspi->s_space_to_time = 0;
+               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
+       } else {
+               uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
+               uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
+                                             uspi->s_minfree - 2, 100) - 1;
+       }
 
        /*
         * Compute another frequently used values
index 823d55a37586037f7ed02f9be5e1dbee890123d4..150eef6f12331a034dab0c48bee530fdc73c821e 100644 (file)
@@ -792,6 +792,8 @@ struct ufs_sb_private_info {
        __s32   fs_magic;       /* filesystem magic */
        unsigned int s_dirblksize;
        __u64   s_root_blocks;
+       __u64   s_time_to_space;
+       __u64   s_space_to_time;
 };
 
 /*
index 5c90f82b8f6b88d4f93a3f7f33b3c0e1b8ec4b20..a6e955bfead852d3cd5a42d6a0785d497f8d8e4e 100644 (file)
@@ -98,8 +98,7 @@ xfs-y                         += xfs_aops.o \
                                   xfs_sysfs.o \
                                   xfs_trans.o \
                                   xfs_xattr.o \
-                                  kmem.o \
-                                  uuid.o
+                                  kmem.o
 
 # low-level transaction/log code
 xfs-y                          += xfs_log.o \
diff --git a/fs/xfs/uuid.c b/fs/xfs/uuid.c
deleted file mode 100644 (file)
index b83f76b..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include <xfs.h>
-
-/* IRIX interpretation of an uuid_t */
-typedef struct {
-       __be32  uu_timelow;
-       __be16  uu_timemid;
-       __be16  uu_timehi;
-       __be16  uu_clockseq;
-       __be16  uu_node[3];
-} xfs_uu_t;
-
-/*
- * uuid_getnodeuniq - obtain the node unique fields of a UUID.
- *
- * This is not in any way a standard or condoned UUID function;
- * it just something that's needed for user-level file handles.
- */
-void
-uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
-{
-       xfs_uu_t *uup = (xfs_uu_t *)uuid;
-
-       fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
-                  be16_to_cpu(uup->uu_timemid);
-       fsid[1] = be32_to_cpu(uup->uu_timelow);
-}
-
-int
-uuid_is_nil(uuid_t *uuid)
-{
-       int     i;
-       char    *cp = (char *)uuid;
-
-       if (uuid == NULL)
-               return 0;
-       /* implied check of version number here... */
-       for (i = 0; i < sizeof *uuid; i++)
-               if (*cp++) return 0;    /* not nil */
-       return 1;       /* is nil */
-}
-
-int
-uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
-{
-       return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
-}
diff --git a/fs/xfs/uuid.h b/fs/xfs/uuid.h
deleted file mode 100644 (file)
index 104db0f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_UUID_H__
-#define __XFS_SUPPORT_UUID_H__
-
-typedef struct {
-       unsigned char   __u_bits[16];
-} uuid_t;
-
-extern int uuid_is_nil(uuid_t *uuid);
-extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
-extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
-
-static inline void
-uuid_copy(uuid_t *dst, uuid_t *src)
-{
-       memcpy(dst, src, sizeof(uuid_t));
-}
-
-#endif /* __XFS_SUPPORT_UUID_H__ */
index 09af0f7cd55e278312881999755d3d8d0793d5c8..3b91faacc1baeaff2ac762e2438e5fcbe48cc76b 100644 (file)
@@ -1316,9 +1316,12 @@ xfs_vm_bmap(
         * The swap code (ab-)uses ->bmap to get a block mapping and then
         * bypasseÑ• the file system for actual I/O.  We really can't allow
         * that on reflinks inodes, so we have to skip out here.  And yes,
-        * 0 is the magic code for a bmap error..
+        * 0 is the magic code for a bmap error.
+        *
+        * Since we don't pass back blockdev info, we can't return bmap
+        * information for rt files either.
         */
-       if (xfs_is_reflink_inode(ip))
+       if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
                return 0;
 
        filemap_write_and_wait(mapping);
index 08cb7d1a4a3a40ebc5f456c3767d998f433c8ed4..013cc78d7daf46ce69427dd05830be65182d71da 100644 (file)
@@ -834,9 +834,7 @@ xfs_inode_item_format_convert(
                in_f->ilf_dsize = in_f32->ilf_dsize;
                in_f->ilf_ino = in_f32->ilf_ino;
                /* copy biggest field of ilf_u */
-               memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-                      in_f32->ilf_u.ilfu_uuid.__u_bits,
-                      sizeof(uuid_t));
+               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
                in_f->ilf_blkno = in_f32->ilf_blkno;
                in_f->ilf_len = in_f32->ilf_len;
                in_f->ilf_boffset = in_f32->ilf_boffset;
@@ -851,9 +849,7 @@ xfs_inode_item_format_convert(
                in_f->ilf_dsize = in_f64->ilf_dsize;
                in_f->ilf_ino = in_f64->ilf_ino;
                /* copy biggest field of ilf_u */
-               memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-                      in_f64->ilf_u.ilfu_uuid.__u_bits,
-                      sizeof(uuid_t));
+               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
                in_f->ilf_blkno = in_f64->ilf_blkno;
                in_f->ilf_len = in_f64->ilf_len;
                in_f->ilf_boffset = in_f64->ilf_boffset;
index 044fb0e15390c7c5538514ec2f49b72fa5942ade..2d167fe643ece8a3a4bf1a9a6c0a83d7751c82fd 100644 (file)
@@ -19,6 +19,7 @@
 #define __XFS_LINUX__
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /*
  * Kernel specific type declarations for XFS
@@ -42,7 +43,6 @@ typedef __u32                 xfs_nlink_t;
 
 #include "kmem.h"
 #include "mrlock.h"
-#include "uuid.h"
 
 #include <linux/semaphore.h>
 #include <linux/mm.h>
index cd0b077deb354b6de5dda287cffaeb351e0e8cce..8cec1e5505a4bf921f953ee285a3c0f2b2e8516e 100644 (file)
@@ -352,13 +352,13 @@ xlog_header_check_mount(
 {
        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 
-       if (uuid_is_nil(&head->h_fs_uuid)) {
+       if (uuid_is_null(&head->h_fs_uuid)) {
                /*
                 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
-                * h_fs_uuid is nil, we assume this log was last mounted
+                * h_fs_uuid is null, we assume this log was last mounted
                 * by IRIX and continue.
                 */
-               xfs_warn(mp, "nil uuid in log - IRIX style log");
+               xfs_warn(mp, "null uuid in log - IRIX style log");
        } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
                xfs_warn(mp, "log has mismatched uuid - can't recover");
                xlog_header_check_dump(mp, head);
index 2eaf8185916610ee115ebd98f8869f2df59b5fe9..d249546da15ef0b2e847f8a2a6c354c94d4600b6 100644 (file)
@@ -74,20 +74,19 @@ xfs_uuid_mount(
        int                     hole, i;
 
        /* Publish UUID in struct super_block */
-       BUILD_BUG_ON(sizeof(mp->m_super->s_uuid) != sizeof(uuid_t));
-       memcpy(&mp->m_super->s_uuid, uuid, sizeof(uuid_t));
+       uuid_copy(&mp->m_super->s_uuid, uuid);
 
        if (mp->m_flags & XFS_MOUNT_NOUUID)
                return 0;
 
-       if (uuid_is_nil(uuid)) {
-               xfs_warn(mp, "Filesystem has nil UUID - can't mount");
+       if (uuid_is_null(uuid)) {
+               xfs_warn(mp, "Filesystem has null UUID - can't mount");
                return -EINVAL;
        }
 
        mutex_lock(&xfs_uuid_table_mutex);
        for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
-               if (uuid_is_nil(&xfs_uuid_table[i])) {
+               if (uuid_is_null(&xfs_uuid_table[i])) {
                        hole = i;
                        continue;
                }
@@ -124,7 +123,7 @@ xfs_uuid_unmount(
 
        mutex_lock(&xfs_uuid_table_mutex);
        for (i = 0; i < xfs_uuid_table_size; i++) {
-               if (uuid_is_nil(&xfs_uuid_table[i]))
+               if (uuid_is_null(&xfs_uuid_table[i]))
                        continue;
                if (!uuid_equal(uuid, &xfs_uuid_table[i]))
                        continue;
@@ -793,7 +792,10 @@ xfs_mountfs(
         *  Copies the low order bits of the timestamp and the randomly
         *  set "sequence" number out of a UUID.
         */
-       uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
+       mp->m_fixedfsid[0] =
+               (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
+                get_unaligned_be16(&sbp->sb_uuid.b[4]);
+       mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
 
        mp->m_dmevmask = 0;     /* not persistent; set after each mount */
 
index 197f3fffc9a7151ed61d0b960f5e452f6beccb5c..c1b163cb68b12d1fa48cdae119596378b7627fdf 100644 (file)
@@ -61,17 +61,18 @@ bool acpi_ata_match(acpi_handle handle);
 bool acpi_bay_match(acpi_handle handle);
 bool acpi_dock_match(acpi_handle handle);
 
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
-union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
+union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
                        u64 rev, u64 func, union acpi_object *argv4);
 
 static inline union acpi_object *
-acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
-                       union acpi_object *argv4, acpi_object_type type)
+acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
+                       u64 func, union acpi_object *argv4,
+                       acpi_object_type type)
 {
        union acpi_object *obj;
 
-       obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+       obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4);
        if (obj && obj->type != type) {
                ACPI_FREE(obj);
                obj = NULL;
@@ -210,7 +211,8 @@ struct acpi_device_flags {
        u32 of_compatible_ok:1;
        u32 coherent_dma:1;
        u32 cca_seen:1;
-       u32 reserved:20;
+       u32 spi_i2c_slave:1;
+       u32 reserved:19;
 };
 
 /* File System */
index 370c0a0473fcb80948decaeba03d092f1c24ca74..d66432c6e6759730fff0a705edacb88be94fd40b 100644 (file)
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
 #define _DT_BINDINGS_CLK_SUN50I_A64_H_
 
+#define CLK_PLL_PERIPH0                11
+
 #define CLK_BUS_MIPI_DSI       28
 #define CLK_BUS_CE             29
 #define CLK_BUS_DMA            30
index c2afc41d69644af3d9f920a56341d62003630883..e139fe5c62ecd5c8375e7bc8fd503465e475985b 100644 (file)
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_
 #define _DT_BINDINGS_CLK_SUN8I_H3_H_
 
+#define CLK_PLL_PERIPH0                9
+
 #define CLK_CPUX               14
 
 #define CLK_BUS_CE             20
index 137e4a3d89c5225dc6a9535265f13e874bdbb8eb..cafdfb84ca28e0bb0b7096532f349472523262e2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/resource_ext.h>
 #include <linux/device.h>
 #include <linux/property.h>
+#include <linux/uuid.h>
 
 #ifndef _LINUX
 #define _LINUX
@@ -457,7 +458,6 @@ struct acpi_osc_context {
        struct acpi_buffer ret;         /* free by caller if success */
 };
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
@@ -741,7 +741,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
 }
 
 static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
-                                                  const u8 *uuid,
+                                                  const guid_t *guid,
                                                   int rev, int func,
                                                   union acpi_object *argv4)
 {
index b74a3edcb3da82903568981a5b49fbbf1f4269cb..1ddd36bd2173b98e925eabdf083a796bfcabdd07 100644 (file)
@@ -391,6 +391,8 @@ struct request_queue {
        int                     nr_rqs[2];      /* # allocated [a]sync rqs */
        int                     nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
 
+       atomic_t                shared_hctx_restart;
+
        struct blk_queue_stats  *stats;
        struct rq_wb            *rq_wb;
 
index fccf7f44139dd67c2bae6db1799e2517bcd9cd00..bbb3712dd8923feca7e8239cf044ef5ef041d7c0 100644 (file)
@@ -27,7 +27,7 @@ struct cleancache_filekey {
 
 struct cleancache_ops {
        int (*init_fs)(size_t);
-       int (*init_shared_fs)(char *uuid, size_t);
+       int (*init_shared_fs)(uuid_t *uuid, size_t);
        int (*get_page)(int, struct cleancache_filekey,
                        pgoff_t, struct page *);
        void (*put_page)(int, struct cleancache_filekey,
index 803e5a9b265422d2c2034678331b6d4dd353d1de..3e68cabb8457e5a9ae5b53bd9d7aa0f844ee99fa 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/delayed_call.h>
+#include <linux/uuid.h>
 
 #include <asm/byteorder.h>
 #include <uapi/linux/fs.h>
@@ -1328,8 +1329,8 @@ struct super_block {
 
        struct sb_writers       s_writers;
 
-       char s_id[32];                          /* Informational name */
-       u8 s_uuid[16];                          /* UUID */
+       char                    s_id[32];       /* Informational name */
+       uuid_t                  s_uuid;         /* UUID */
 
        void                    *s_fs_info;     /* Filesystem private info */
        unsigned int            s_max_links;
index acff9437e5c3776e48a9357af40291165719416d..e619fae2f0375e073e5f3216fd764676ae1340b8 100644 (file)
@@ -219,12 +219,6 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
        return NULL;
 }
 
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-       uuid_be_to_bin(uuid_str, (uuid_be *)to);
-       return 0;
-}
-
 static inline int disk_max_parts(struct gendisk *disk)
 {
        if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -736,11 +730,6 @@ static inline dev_t blk_lookup_devt(const char *name, int partno)
        dev_t devt = MKDEV(0, 0);
        return devt;
 }
-
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-       return -EINVAL;
-}
 #endif /* CONFIG_BLOCK */
 
 #endif /* _LINUX_GENHD_H */
index e997c4a49a8884e3b1167a830e7fdf0431365a3d..bc711a10be05c2d8354a9ba2eff382bc992314b1 100644 (file)
@@ -177,7 +177,6 @@ struct fcnvme_lsdesc_rjt {
 };
 
 
-#define FCNVME_ASSOC_HOSTID_LEN                16
 #define FCNVME_ASSOC_HOSTNQN_LEN       256
 #define FCNVME_ASSOC_SUBNQN_LEN                256
 
@@ -191,7 +190,7 @@ struct fcnvme_lsdesc_cr_assoc_cmd {
        __be16  cntlid;
        __be16  sqsize;
        __be32  rsvd52;
-       u8      hostid[FCNVME_ASSOC_HOSTID_LEN];
+       uuid_t  hostid;
        u8      hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
        u8      subnqn[FCNVME_ASSOC_SUBNQN_LEN];
        u8      rsvd632[384];
index b625bacf37efaabd84e8735b2b304401cdb195fd..e400a69fa1d324acf3a2bce7ba25d6c24054b449 100644 (file)
@@ -16,6 +16,7 @@
 #define _LINUX_NVME_H
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /* NQN names in commands fields specified one size */
 #define NVMF_NQN_FIELD_LEN     256
@@ -843,7 +844,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       __u8            hostid[16];
+       uuid_t          hostid;
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
index 7a4e83a8c89ca8d960c75b6002aeff547fa2890e..dd86c97f2454b25746552c26e0e985363c18d352 100644 (file)
@@ -105,7 +105,7 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
 static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
 #endif
 
-extern const u8 pci_acpi_dsm_uuid[];
+extern const guid_t pci_acpi_dsm_guid;
 #define DEVICE_LABEL_DSM       0x07
 #define RESET_DELAY_DSM                0x08
 #define FUNCTION_DELAY_DSM     0x09
index 07ef550c662708035459293fe39fa895cca9fce5..93315d6b21a85fea729970574eecd66027f8f520 100644 (file)
@@ -84,6 +84,7 @@ struct kmem_cache {
        int red_left_pad;       /* Left redzone padding size */
 #ifdef CONFIG_SYSFS
        struct kobject kobj;    /* For sysfs */
+       struct work_struct kobj_remove_work;
 #endif
 #ifdef CONFIG_MEMCG
        struct memcg_cache_params memcg_params;
index 110f4532188c7b6c50cf1ce4dcd5dc525beae9a1..f7043ccca81cc65f15c252158e745dc4c2948155 100644 (file)
@@ -29,7 +29,6 @@
  */
 struct tk_read_base {
        struct clocksource      *clock;
-       u64                     (*read)(struct clocksource *cs);
        u64                     mask;
        u64                     cycle_last;
        u32                     mult;
@@ -58,7 +57,7 @@ struct tk_read_base {
  *                     interval.
  * @xtime_remainder:   Shifted nano seconds left over when rounding
  *                     @cycle_interval
- * @raw_interval:      Raw nano seconds accumulated per NTP interval.
+ * @raw_interval:      Shifted raw nano seconds accumulated per NTP interval.
  * @ntp_error:         Difference between accumulated time and NTP time in ntp
  *                     shifted nano seconds.
  * @ntp_error_shift:   Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@ struct timekeeper {
        u64                     cycle_interval;
        u64                     xtime_interval;
        s64                     xtime_remainder;
-       u32                     raw_interval;
+       u64                     raw_interval;
        /* The ntp_tick_length() value currently being used.
         * This cached copy ensures we consistently apply the tick
         * length for an entire tick, as ntp_tick_length may change
index 4dff73a8975836dab9e49ce57ddcb1d6a2e79782..75f7182d5360bf5b58688ea349b54c218679ff58 100644 (file)
 
 #include <uapi/linux/uuid.h>
 
-/*
- * V1 (time-based) UUID definition [RFC 4122].
- * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
- *   increments since midnight 15th October 1582
- *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
- *     time
- * - the clock sequence is a 14-bit counter to avoid duplicate times
- */
-struct uuid_v1 {
-       __be32          time_low;                       /* low part of timestamp */
-       __be16          time_mid;                       /* mid part of timestamp */
-       __be16          time_hi_and_version;            /* high part of timestamp and version  */
-#define UUID_TO_UNIX_TIME      0x01b21dd213814000ULL
-#define UUID_TIMEHI_MASK       0x0fff
-#define UUID_VERSION_TIME      0x1000  /* time-based UUID */
-#define UUID_VERSION_NAME      0x3000  /* name-based UUID */
-#define UUID_VERSION_RANDOM    0x4000  /* (pseudo-)random generated UUID */
-       u8              clock_seq_hi_and_reserved;      /* clock seq hi and variant */
-#define UUID_CLOCKHI_MASK      0x3f
-#define UUID_VARIANT_STD       0x80
-       u8              clock_seq_low;                  /* clock seq low */
-       u8              node[6];                        /* spatially unique node ID (MAC addr) */
-};
+typedef struct {
+       __u8 b[16];
+} uuid_t;
+
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+((uuid_t)                                                              \
+{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
+   ((b) >> 8) & 0xff, (b) & 0xff,                                      \
+   ((c) >> 8) & 0xff, (c) & 0xff,                                      \
+   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
 /*
  * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
@@ -48,27 +35,73 @@ struct uuid_v1 {
  */
 #define        UUID_STRING_LEN         36
 
-static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+extern const guid_t guid_null;
+extern const uuid_t uuid_null;
+
+static inline bool guid_equal(const guid_t *u1, const guid_t *u2)
+{
+       return memcmp(u1, u2, sizeof(guid_t)) == 0;
+}
+
+static inline void guid_copy(guid_t *dst, const guid_t *src)
+{
+       memcpy(dst, src, sizeof(guid_t));
+}
+
+static inline bool guid_is_null(guid_t *guid)
+{
+       return guid_equal(guid, &guid_null);
+}
+
+static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
+{
+       return memcmp(u1, u2, sizeof(uuid_t)) == 0;
+}
+
+static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
 {
-       return memcmp(&u1, &u2, sizeof(uuid_le));
+       memcpy(dst, src, sizeof(uuid_t));
 }
 
-static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
+static inline bool uuid_is_null(uuid_t *uuid)
 {
-       return memcmp(&u1, &u2, sizeof(uuid_be));
+       return uuid_equal(uuid, &uuid_null);
 }
 
 void generate_random_uuid(unsigned char uuid[16]);
 
-extern void uuid_le_gen(uuid_le *u);
-extern void uuid_be_gen(uuid_be *u);
+extern void guid_gen(guid_t *u);
+extern void uuid_gen(uuid_t *u);
 
 bool __must_check uuid_is_valid(const char *uuid);
 
-extern const u8 uuid_le_index[16];
-extern const u8 uuid_be_index[16];
+extern const u8 guid_index[16];
+extern const u8 uuid_index[16];
+
+int guid_parse(const char *uuid, guid_t *u);
+int uuid_parse(const char *uuid, uuid_t *u);
+
+/* backwards compatibility, don't use in new code */
+typedef uuid_t uuid_be;
+#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+       UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define NULL_UUID_BE                                                   \
+       UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
+            0x00, 0x00, 0x00, 0x00)
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u);
-int uuid_be_to_bin(const char *uuid, uuid_be *u);
+#define uuid_le_gen(u)         guid_gen(u)
+#define uuid_be_gen(u)         uuid_gen(u)
+#define uuid_le_to_bin(guid, u)        guid_parse(guid, u)
+#define uuid_be_to_bin(uuid, u)        uuid_parse(uuid, u)
+
+static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
+{
+       return memcmp(&u1, &u2, sizeof(guid_t));
+}
+
+static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
+{
+       return memcmp(&u1, &u2, sizeof(uuid_t));
+}
 
 #endif
index 345911965dbb8f53289d63535828969454ae3333..454ff763eeba9b829051862d7cce278be7d77b42 100644 (file)
@@ -6,7 +6,7 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                      void __user *arg);
 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
                             unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 struct iw_statistics *get_wireless_stats(struct net_device *dev);
 int call_commit_handler(struct net_device *dev);
 #else
-static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                                    void __user *arg)
 {
        return -EINVAL;
index 3738e5fb6a4de8c3e06946a3798d4d30f21ca82c..8ef82f433877a53fe1d4fd7cc3f3cd4e27e1a062 100644 (file)
 
 typedef struct {
        __u8 b[16];
-} uuid_le;
+} guid_t;
 
-typedef struct {
-       __u8 b[16];
-} uuid_be;
-
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
-((uuid_le)                                                             \
+#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+((guid_t)                                                              \
 {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
    (b) & 0xff, ((b) >> 8) & 0xff,                                      \
    (c) & 0xff, ((c) >> 8) & 0xff,                                      \
    (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
-#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
-((uuid_be)                                                             \
-{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
-   ((b) >> 8) & 0xff, (b) & 0xff,                                      \
-   ((c) >> 8) & 0xff, (c) & 0xff,                                      \
-   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-
+/* backwards compatibility, don't use in new code */
+typedef guid_t uuid_le;
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
+       GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
 #define NULL_UUID_LE                                                   \
        UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
-               0x00, 0x00, 0x00, 0x00)
-
-#define NULL_UUID_BE                                                   \
-       UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
-               0x00, 0x00, 0x00, 0x00)
-
+            0x00, 0x00, 0x00, 0x00)
 
 #endif /* _UAPI_LINUX_UUID_H_ */
index 2831480c63a28b8e9b8cee1c0b30968860b3fcd0..ee97196bb1510e4f95cfd6ddc039fa9700cdc828 100644 (file)
@@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
        int ret = -ENOMEM, max_order = 0;
 
        if (!has_aux(event))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
                /*
index f8269036bf0b84f89faa2db3b5dc5fe4c259a019..52c4e907c14b0d4a7896d7ca9a6fa6fbc17451ba 100644 (file)
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
        ops = container_of(fops, struct klp_ops, fops);
 
-       rcu_read_lock();
+       /*
+        * A variant of synchronize_sched() is used to allow patching functions
+        * where RCU is not watching, see klp_synchronize_transition().
+        */
+       preempt_disable_notrace();
 
        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
                                      stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 unlock:
-       rcu_read_unlock();
+       preempt_enable_notrace();
 }
 
 /*
index adc0cc64aa4b6ae199e5f4a5b2af0ad59c382175..b004a1fb603236f31cd5780962c098bfcf8fe743 100644 (file)
@@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
 }
 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 
+/*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ */
+static void klp_sync(struct work_struct *work)
+{
+}
+
+/*
+ * We allow to patch also functions where RCU is not watching,
+ * e.g. before user_exit(). We can not rely on the RCU infrastructure
+ * to do the synchronization. Instead hard force the sched synchronization.
+ *
+ * This approach allows to use RCU functions for manipulating func_stack
+ * safely.
+ */
+static void klp_synchronize_transition(void)
+{
+       schedule_on_each_cpu(klp_sync);
+}
+
 /*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
                 * func->transition gets cleared, the handler may choose a
                 * removed function.
                 */
-               synchronize_rcu();
+               klp_synchronize_transition();
        }
 
        if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
 
        /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
        if (klp_target_state == KLP_PATCHED)
-               synchronize_rcu();
+               klp_synchronize_transition();
 
        read_lock(&tasklist_lock);
        for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
  */
 void klp_update_patch_state(struct task_struct *task)
 {
-       rcu_read_lock();
+       /*
+        * A variant of synchronize_sched() is used to allow patching functions
+        * where RCU is not watching, see klp_synchronize_transition().
+        */
+       preempt_disable_notrace();
 
        /*
         * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
        if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
                task->patch_state = READ_ONCE(klp_target_state);
 
-       rcu_read_unlock();
+       preempt_enable_notrace();
 }
 
 /*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
                clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
 
        /* Let any remaining calls to klp_update_patch_state() complete */
-       synchronize_rcu();
+       klp_synchronize_transition();
 
        klp_start_transition();
 }
index ca92bcfeb322f3f836031ec8b3ab21867f39adf5..45b4c1ffe14ef4334a918d0fc9ed407d801cafe6 100644 (file)
@@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
        return !tsk->ptrace;
 }
 
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+                          bool *resched_timer)
 {
        struct sigqueue *q, *first = NULL;
 
@@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 still_pending:
                list_del_init(&first->list);
                copy_siginfo(info, &first->info);
+
+               *resched_timer =
+                       (first->flags & SIGQUEUE_PREALLOC) &&
+                       (info->si_code == SI_TIMER) &&
+                       (info->si_sys_private);
+
                __sigqueue_free(first);
        } else {
                /*
@@ -548,12 +555,12 @@ still_pending:
 }
 
 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
-                       siginfo_t *info)
+                       siginfo_t *info, bool *resched_timer)
 {
        int sig = next_signal(pending, mask);
 
        if (sig)
-               collect_signal(sig, pending, info);
+               collect_signal(sig, pending, info, resched_timer);
        return sig;
 }
 
@@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
+       bool resched_timer = false;
        int signr;
 
        /* We only dequeue private signals from ourselves, we don't let
         * signalfd steal them
         */
-       signr = __dequeue_signal(&tsk->pending, mask, info);
+       signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
        if (!signr) {
                signr = __dequeue_signal(&tsk->signal->shared_pending,
-                                        mask, info);
+                                        mask, info, &resched_timer);
 #ifdef CONFIG_POSIX_TIMERS
                /*
                 * itimer signal ?
@@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                current->jobctl |= JOBCTL_STOP_DEQUEUED;
        }
 #ifdef CONFIG_POSIX_TIMERS
-       if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+       if (resched_timer) {
                /*
                 * Release the siglock to ensure proper locking order
                 * of timer locks outside of siglocks.  Note, we leave
index ece4b177052baa8a2ba9a5aded7f9694594eb4e5..939a158eab11d2b2cca4f8412f0423449f964efd 100644 (file)
@@ -1119,7 +1119,7 @@ static ssize_t bin_uuid(struct file *file,
        /* Only supports reads */
        if (oldval && oldlen) {
                char buf[UUID_STRING_LEN + 1];
-               uuid_be uuid;
+               uuid_t uuid;
 
                result = kernel_read(file, 0, buf, sizeof(buf) - 1);
                if (result < 0)
@@ -1128,7 +1128,7 @@ static ssize_t bin_uuid(struct file *file,
                buf[result] = '\0';
 
                result = -EIO;
-               if (uuid_be_to_bin(buf, &uuid))
+               if (uuid_parse(buf, &uuid))
                        goto out;
 
                if (oldlen > 16)
index 9652bc57fd09811fa4e3ffbabc81b9139e75f125..b602c48cb84123890dbdc40b9a3da94439ba3a9d 100644 (file)
@@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
        tk->offs_boot = ktime_add(tk->offs_boot, delta);
 }
 
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function.  This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+       struct clocksource *clock = READ_ONCE(tkr->clock);
+
+       return clock->read(clock);
+}
+
 #ifdef CONFIG_DEBUG_TIMEKEEPING
 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 
@@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
         */
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-               now = tkr->read(tkr->clock);
+               now = tk_clock_read(tkr);
                last = tkr->cycle_last;
                mask = tkr->mask;
                max = tkr->clock->max_cycles;
@@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
        u64 cycle_now, delta;
 
        /* read clocksource */
-       cycle_now = tkr->read(tkr->clock);
+       cycle_now = tk_clock_read(tkr);
 
        /* calculate the delta since the last update_wall_time */
        delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
        ++tk->cs_was_changed_seq;
        old_clock = tk->tkr_mono.clock;
        tk->tkr_mono.clock = clock;
-       tk->tkr_mono.read = clock->read;
        tk->tkr_mono.mask = clock->mask;
-       tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+       tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 
        tk->tkr_raw.clock = clock;
-       tk->tkr_raw.read = clock->read;
        tk->tkr_raw.mask = clock->mask;
        tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 
@@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
        /* Go back from cycles -> shifted ns */
        tk->xtime_interval = interval * clock->mult;
        tk->xtime_remainder = ntpinterval - tk->xtime_interval;
-       tk->raw_interval = (interval * clock->mult) >> clock->shift;
+       tk->raw_interval = interval * clock->mult;
 
         /* if changing clocks, convert xtime_nsec shift units */
        if (old_clock) {
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 
                now += timekeeping_delta_to_ns(tkr,
                                clocksource_delta(
-                                       tkr->read(tkr->clock),
+                                       tk_clock_read(tkr),
                                        tkr->cycle_last,
                                        tkr->mask));
        } while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
        return cycles_at_suspend;
 }
 
+static struct clocksource dummy_clock = {
+       .read = dummy_clock_read,
+};
+
 /**
  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
  * @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
        struct tk_read_base *tkr = &tk->tkr_mono;
 
        memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-       cycles_at_suspend = tkr->read(tkr->clock);
-       tkr_dummy.read = dummy_clock_read;
+       cycles_at_suspend = tk_clock_read(tkr);
+       tkr_dummy.clock = &dummy_clock;
        update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 
        tkr = &tk->tkr_raw;
        memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-       tkr_dummy.read = dummy_clock_read;
+       tkr_dummy.clock = &dummy_clock;
        update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
@@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
  */
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
-       struct clocksource *clock = tk->tkr_mono.clock;
        u64 cycle_now, delta;
        u64 nsec;
 
-       cycle_now = tk->tkr_mono.read(clock);
+       cycle_now = tk_clock_read(&tk->tkr_mono);
        delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
        tk->tkr_mono.cycle_last = cycle_now;
        tk->tkr_raw.cycle_last  = cycle_now;
@@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-
-               now = tk->tkr_mono.read(tk->tkr_mono.clock);
+               now = tk_clock_read(&tk->tkr_mono);
                systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
                systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
                base_real = ktime_add(tk->tkr_mono.base,
@@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
                 * Check whether the system counter value provided by the
                 * device driver is on the current timekeeping interval.
                 */
-               now = tk->tkr_mono.read(tk->tkr_mono.clock);
+               now = tk_clock_read(&tk->tkr_mono);
                interval_start = tk->tkr_mono.cycle_last;
                if (!cycle_between(interval_start, cycles, now)) {
                        clock_was_set_seq = tk->clock_was_set_seq;
@@ -1629,7 +1649,7 @@ void timekeeping_resume(void)
         * The less preferred source will only be tried if there is no better
         * usable source. The rtc part is handled separately in rtc core code.
         */
-       cycle_now = tk->tkr_mono.read(clock);
+       cycle_now = tk_clock_read(&tk->tkr_mono);
        if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
                cycle_now > tk->tkr_mono.cycle_last) {
                u64 nsec, cyc_delta;
@@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
                                    u32 shift, unsigned int *clock_set)
 {
        u64 interval = tk->cycle_interval << shift;
-       u64 raw_nsecs;
+       u64 snsec_per_sec;
 
        /* If the offset is smaller than a shifted interval, do nothing */
        if (offset < interval)
@@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
        *clock_set |= accumulate_nsecs_to_secs(tk);
 
        /* Accumulate raw time */
-       raw_nsecs = (u64)tk->raw_interval << shift;
-       raw_nsecs += tk->raw_time.tv_nsec;
-       if (raw_nsecs >= NSEC_PER_SEC) {
-               u64 raw_secs = raw_nsecs;
-               raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
-               tk->raw_time.tv_sec += raw_secs;
+       tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+       tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+       snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+       while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+               tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+               tk->raw_time.tv_sec++;
        }
-       tk->raw_time.tv_nsec = raw_nsecs;
+       tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+       tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
 
        /* Accumulate error between NTP and clock interval */
        tk->ntp_error += tk->ntp_tick << shift;
@@ -2030,7 +2051,7 @@ void update_wall_time(void)
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
        offset = real_tk->cycle_interval;
 #else
-       offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+       offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
                                   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 #endif
 
index 3c6432df7e63466a24d41dead807c7ef14c0ab86..4c0888c4a68d9621717f9012d3ca4cc0b72df20b 100644 (file)
  *     the values[M, M+1, ..., N] into the ints array in get_options.
  */
 
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
 {
        int x, inc_counter, upper_range;
 
        (*str)++;
        upper_range = simple_strtol((*str), NULL, 0);
        inc_counter = upper_range - *pint;
-       for (x = *pint; x < upper_range; x++)
+       for (x = *pint; n && x < upper_range; x++, n--)
                *pint++ = x;
        return inc_counter;
 }
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
                        break;
                if (res == 3) {
                        int range_nums;
-                       range_nums = get_range((char **)&str, ints + i);
+                       range_nums = get_range((char **)&str, ints + i, nints - i);
                        if (range_nums < 0)
                                break;
                        /*
index 547d3127a3cf06f98514e7e8f9ed9e3198c81663..478c049630b5cb8e09a57c5ccd012d51b5c4be97 100644 (file)
 
 struct test_uuid_data {
        const char *uuid;
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
 };
 
 static const struct test_uuid_data test_uuid_test_data[] = {
        {
                .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
-               .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
-               .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
        },
        {
                .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
-               .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
-               .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
        },
        {
                .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
-               .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
-               .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
        },
 };
 
@@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
 
 static void __init test_uuid_test(const struct test_uuid_data *data)
 {
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
        char buf[48];
 
        /* LE */
        total_tests++;
-       if (uuid_le_to_bin(data->uuid, &le))
+       if (guid_parse(data->uuid, &le))
                test_uuid_failed("conversion", false, false, data->uuid, NULL);
 
        total_tests++;
-       if (uuid_le_cmp(data->le, le)) {
+       if (!guid_equal(&data->le, &le)) {
                sprintf(buf, "%pUl", &le);
                test_uuid_failed("cmp", false, false, data->uuid, buf);
        }
 
        /* BE */
        total_tests++;
-       if (uuid_be_to_bin(data->uuid, &be))
+       if (uuid_parse(data->uuid, &be))
                test_uuid_failed("conversion", false, true, data->uuid, NULL);
 
        total_tests++;
-       if (uuid_be_cmp(data->be, be)) {
+       if (uuid_equal(&data->be, &be)) {
                sprintf(buf, "%pUb", &be);
                test_uuid_failed("cmp", false, true, data->uuid, buf);
        }
@@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
 
 static void __init test_uuid_wrong(const char *data)
 {
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
 
        /* LE */
        total_tests++;
-       if (!uuid_le_to_bin(data, &le))
+       if (!guid_parse(data, &le))
                test_uuid_failed("negative", true, false, data, NULL);
 
        /* BE */
        total_tests++;
-       if (!uuid_be_to_bin(data, &be))
+       if (!uuid_parse(data, &be))
                test_uuid_failed("negative", true, true, data, NULL);
 }
 
index 37687af77ff847aeb47f88c8ea5d9b6cda8ac5b3..680b9fb9ba098243a2b51f81fa65c20d7ae69bd8 100644 (file)
 #include <linux/uuid.h>
 #include <linux/random.h>
 
-const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_le_index);
-const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_be_index);
+const guid_t guid_null;
+EXPORT_SYMBOL(guid_null);
+const uuid_t uuid_null;
+EXPORT_SYMBOL(uuid_null);
+
+const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
 
 /***************************************************************
  * Random UUID interface
@@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16])
        b[8] = (b[8] & 0x3F) | 0x80;
 }
 
-void uuid_le_gen(uuid_le *lu)
+void guid_gen(guid_t *lu)
 {
        __uuid_gen_common(lu->b);
        /* version 4 : random generation */
        lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_le_gen);
+EXPORT_SYMBOL_GPL(guid_gen);
 
-void uuid_be_gen(uuid_be *bu)
+void uuid_gen(uuid_t *bu)
 {
        __uuid_gen_common(bu->b);
        /* version 4 : random generation */
        bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_be_gen);
+EXPORT_SYMBOL_GPL(uuid_gen);
 
 /**
   * uuid_is_valid - checks if UUID string valid
@@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid)
 }
 EXPORT_SYMBOL(uuid_is_valid);
 
-static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
+static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16])
 {
        static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
        unsigned int i;
@@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
        return 0;
 }
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u)
+int guid_parse(const char *uuid, guid_t *u)
 {
-       return __uuid_to_bin(uuid, u->b, uuid_le_index);
+       return __uuid_parse(uuid, u->b, guid_index);
 }
-EXPORT_SYMBOL(uuid_le_to_bin);
+EXPORT_SYMBOL(guid_parse);
 
-int uuid_be_to_bin(const char *uuid, uuid_be *u)
+int uuid_parse(const char *uuid, uuid_t *u)
 {
-       return __uuid_to_bin(uuid, u->b, uuid_be_index);
+       return __uuid_parse(uuid, u->b, uuid_index);
 }
-EXPORT_SYMBOL(uuid_be_to_bin);
+EXPORT_SYMBOL(uuid_parse);
index 2d41de3f98a1c9a0e0883b3d73b6980b0110cff1..9f37d6208e99fdcfa768319772935b92124dd9e0 100644 (file)
@@ -1308,14 +1308,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        char uuid[UUID_STRING_LEN + 1];
        char *p = uuid;
        int i;
-       const u8 *index = uuid_be_index;
+       const u8 *index = uuid_index;
        bool uc = false;
 
        switch (*(++fmt)) {
        case 'L':
                uc = true;              /* fall-through */
        case 'l':
-               index = uuid_le_index;
+               index = guid_index;
                break;
        case 'B':
                uc = true;
index ba5d8f3e6d68a3769589c372adc3183b4addfb40..f7b9fdc79d97c15cc6790566469b6614c686aa90 100644 (file)
@@ -130,7 +130,7 @@ void __cleancache_init_shared_fs(struct super_block *sb)
        int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
 
        if (cleancache_ops) {
-               pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
+               pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
                if (pool_id < 0)
                        pool_id = CLEANCACHE_NO_POOL;
        }
index 945fd1ca49b5af0bc3b87dbfe8098f0f602775a2..df4ebdb2b10a373723330dc0124957cd2cb1c021 100644 (file)
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        spin_unlock(ptl);
                        free_page_and_swap_cache(src_page);
                }
-               cond_resched();
        }
 }
 
index 8e07976d5e47727273e9b469992f76ab0c309d58..a5e3dcd75e79f40557cefc1bd5338d34eb4fa2f2 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1817,7 +1817,8 @@ check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
                        return -ENOMEM;
-               if (gap_end >= low_limit && gap_end - gap_start >= length)
+               if (gap_end >= low_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit right subtree if it looks promising */
@@ -1920,7 +1921,8 @@ check_current:
                gap_end = vm_start_gap(vma);
                if (gap_end < low_limit)
                        return -ENOMEM;
-               if (gap_start <= high_limit && gap_end - gap_start >= length)
+               if (gap_start <= high_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit left subtree if it looks promising */
@@ -2228,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /* Guard against wrapping around to address 0. */
+       /* Guard against exceeding limits of the address space. */
        address &= PAGE_MASK;
-       address += PAGE_SIZE;
-       if (!address)
+       if (address >= TASK_SIZE)
                return -ENOMEM;
+       address += PAGE_SIZE;
 
        /* Enforce stack_guard_gap */
        gap_addr = address + stack_guard_gap;
-       if (gap_addr < address)
-               return -ENOMEM;
+
+       /* Guard against overflow */
+       if (gap_addr < address || gap_addr > TASK_SIZE)
+               gap_addr = TASK_SIZE;
+
        next = vma->vm_next;
        if (next && next->vm_start < gap_addr) {
                if (!(next->vm_flags & VM_GROWSUP))
index e67d6ba4e98e73210c8046e82612180fca220e89..391f2dcca72782051cf2dfc5b71b7af11c73f2c9 100644 (file)
@@ -75,6 +75,7 @@ static struct vfsmount *shm_mnt;
 #include <uapi/linux/memfd.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/rmap.h>
+#include <linux/uuid.h>
 
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -3761,6 +3762,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_TMPFS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
+       uuid_gen(&sb->s_uuid);
 
        inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
        if (!inode)
index 7449593fca724147cef5b8f7a46752333e5e0585..8addc535bcdc58794fe40e72a729e4589d44d2b6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
        return name;
 }
 
+static void sysfs_slab_remove_workfn(struct work_struct *work)
+{
+       struct kmem_cache *s =
+               container_of(work, struct kmem_cache, kobj_remove_work);
+
+       if (!s->kobj.state_in_sysfs)
+               /*
+                * For a memcg cache, this may be called during
+                * deactivation and again on shutdown.  Remove only once.
+                * A cache is never shut down before deactivation is
+                * complete, so no need to worry about synchronization.
+                */
+               return;
+
+#ifdef CONFIG_MEMCG
+       kset_unregister(s->memcg_kset);
+#endif
+       kobject_uevent(&s->kobj, KOBJ_REMOVE);
+       kobject_del(&s->kobj);
+       kobject_put(&s->kobj);
+}
+
 static int sysfs_slab_add(struct kmem_cache *s)
 {
        int err;
@@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
        struct kset *kset = cache_kset(s);
        int unmergeable = slab_unmergeable(s);
 
+       INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
+
        if (!kset) {
                kobject_init(&s->kobj, &slab_ktype);
                return 0;
@@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
                 */
                return;
 
-       if (!s->kobj.state_in_sysfs)
-               /*
-                * For a memcg cache, this may be called during
-                * deactivation and again on shutdown.  Remove only once.
-                * A cache is never shut down before deactivation is
-                * complete, so no need to worry about synchronization.
-                */
-               return;
-
-#ifdef CONFIG_MEMCG
-       kset_unregister(s->memcg_kset);
-#endif
-       kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
+       kobject_get(&s->kobj);
+       schedule_work(&s->kobj_remove_work);
 }
 
 void sysfs_slab_release(struct kmem_cache *s)
index 34a1c3e46ed72594b499e7f61e8aacdd4c5fe818..ecc97f74ab182fe9aeb7d7eda5166dfdb5b03095 100644 (file)
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
        if (p4d_none(*p4d))
                return NULL;
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+
+       /*
+        * Don't dereference bad PUD or PMD (below) entries. This will also
+        * identify huge mappings, which we may encounter on architectures
+        * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
+        * identified as vmalloc addresses by is_vmalloc_addr(), but are
+        * not [unambiguously] associated with a struct page, so there is
+        * no correct value to return for them.
+        */
+       WARN_ON_ONCE(pud_bad(*pud));
+       if (pud_none(*pud) || pud_bad(*pud))
                return NULL;
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       WARN_ON_ONCE(pmd_bad(*pmd));
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
                return NULL;
 
        ptep = pte_offset_map(pmd, addr);
index 467069b73ce1b89e5a7b72904a878ef1f3412c67..9649579b5b9f38aff6ce7a990d2dc1ddb1d85e12 100644 (file)
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
        return 0;
 
 out_free_newdev:
-       free_netdev(new_dev);
+       if (new_dev->reg_state == NETREG_UNINITIALIZED)
+               free_netdev(new_dev);
        return err;
 }
 
index 6d60149287a1868cd65fcc55a4e29edd7611def3..7243421c9783bf060dac1938fc1f2f59a4afafea 100644 (file)
@@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
        if (rc == BUSY_POLL_BUDGET)
                __napi_schedule(napi);
        local_bh_enable();
-       if (local_softirq_pending())
-               do_softirq();
 }
 
 void napi_busy_loop(unsigned int napi_id,
index b94b1d29350603e19c3db8e6fd740d3f89440771..27fad31784a83861f942f3b4f82f44985a946645 100644 (file)
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (cmd == SIOCGIFNAME)
                return dev_ifname(net, (struct ifreq __user *)arg);
 
+       /*
+        * Take care of Wireless Extensions. Unfortunately struct iwreq
+        * isn't a proper subset of struct ifreq (it's 8 byte shorter)
+        * so we need to treat it specially, otherwise applications may
+        * fault if the struct they're passing happens to land at the
+        * end of a mapped page.
+        */
+       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+               struct iwreq iwr;
+
+               if (copy_from_user(&iwr, arg, sizeof(iwr)))
+                       return -EFAULT;
+
+               return wext_handle_ioctl(net, &iwr, cmd, arg);
+       }
+
        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
                return -EFAULT;
 
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                                ret = -EFAULT;
                        return ret;
                }
-               /* Take care of Wireless Extensions */
-               if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-                       return wext_handle_ioctl(net, &ifr, cmd, arg);
                return -ENOTTY;
        }
 }
index f21c4d3aeae0cf59a8d9239cf1e581e6d136c740..3bba291c6c32e4359a6d626fbd492f7d07fd3e4c 100644 (file)
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct fib_rule_hdr *frh = nlmsg_data(nlh);
        struct fib_rules_ops *ops = NULL;
-       struct fib_rule *rule, *tmp;
+       struct fib_rule *rule, *r;
        struct nlattr *tb[FRA_MAX+1];
        struct fib_kuid_range range;
        int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                /*
                 * Check if this rule is a target to any of them. If so,
+                * adjust to the next one with the same preference or
                 * disable them. As this operation is eventually very
-                * expensive, it is only performed if goto rules have
-                * actually been added.
+                * expensive, it is only performed if goto rules, except
+                * current if it is goto rule, have actually been added.
                 */
                if (ops->nr_goto_rules > 0) {
-                       list_for_each_entry(tmp, &ops->rules_list, list) {
-                               if (rtnl_dereference(tmp->ctarget) == rule) {
-                                       RCU_INIT_POINTER(tmp->ctarget, NULL);
+                       struct fib_rule *n;
+
+                       n = list_next_entry(rule, list);
+                       if (&n->list == &ops->rules_list || n->pref != rule->pref)
+                               n = NULL;
+                       list_for_each_entry(r, &ops->rules_list, list) {
+                               if (rtnl_dereference(r->ctarget) != rule)
+                                       continue;
+                               rcu_assign_pointer(r->ctarget, n);
+                               if (!n)
                                        ops->unresolved_rules++;
-                               }
                        }
                }
 
index 5e61456f6bc795cfb75db2d530eb3b1872c82989..467a2f4510a74cad48209b696532838d1edcf96e 100644 (file)
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(1) /* IFLA_LINKMODE */
               + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
               + nla_total_size(4) /* IFLA_LINK_NETNSID */
+              + nla_total_size(4) /* IFLA_GROUP */
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_LINK_NETNSID]     = { .type = NLA_S32 },
        [IFLA_PROTO_DOWN]       = { .type = NLA_U8 },
        [IFLA_XDP]              = { .type = NLA_NESTED },
+       [IFLA_GROUP]            = { .type = NLA_U32 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
index 4b9518a0d2489494ed88b6808f95803cf1b543ad..6f95612b4d321d63e39b37f881f0e46331999ad2 100644 (file)
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
        call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
-static inline void dnrt_drop(struct dn_route *rt)
-{
-       dst_release(&rt->dst);
-       call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
 static void dn_dst_check_expire(unsigned long dummy)
 {
        int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
                        }
                        *rtp = rt->dst.dn_next;
                        rt->dst.dn_next = NULL;
-                       dnrt_drop(rt);
+                       dnrt_free(rt);
                        break;
                }
                spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
                        dst_use(&rth->dst, now);
                        spin_unlock_bh(&dn_rt_hash_table[hash].lock);
 
-                       dnrt_drop(rt);
+                       dst_free(&rt->dst);
                        *rp = rth;
                        return 0;
                }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
                for(; rt; rt = next) {
                        next = rcu_dereference_raw(rt->dst.dn_next);
                        RCU_INIT_POINTER(rt->dst.dn_next, NULL);
-                       dst_free((struct dst_entry *)rt);
+                       dnrt_free(rt);
                }
 
 nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
-       rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+       rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
        if (rt == NULL)
                goto e_nobufs;
 
index 8f6b5bbcbf69f54f354d3678cf6e5f8374602edb..ec9a396fa4660272f96602e78c5c3b0603068d66 100644 (file)
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
        if (!pmc)
                return;
+       spin_lock_init(&pmc->lock);
        spin_lock_bh(&im->lock);
        pmc->interface = im->interface;
        in_dev_hold(in_dev);
index b436d077563174c22b48a81a6a856f30dd831a5e..129d1a3616f838c9b487cf2392d1d9eb09dcfa5a 100644 (file)
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
        return 0;
 
 drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
        kfree_skb(skb);
        return 0;
 }
index 6a4fb1e629fb7609048156974ae2eb322cebddae..686c92375e81d50787adbfb423afba76903948ad 100644 (file)
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
                                   unsigned long delay)
 {
-       if (!delayed_work_pending(&ifp->dad_work))
-               in6_ifa_hold(ifp);
-       mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+       in6_ifa_hold(ifp);
+       if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+               in6_ifa_put(ifp);
 }
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
index eea23b57c6a5a000aec234cc9bf6f9411d98001f..ec849d88a66205742b1a58c4959c08eeffc3f6d7 100644 (file)
@@ -32,7 +32,6 @@ struct fib6_rule {
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
-       struct rt6_info *rt;
        struct fib_lookup_arg arg = {
                .lookup_ptr = lookup,
                .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        fib_rules_lookup(net->ipv6.fib6_rules_ops,
                         flowi6_to_flowi(fl6), flags, &arg);
 
-       rt = arg.result;
+       if (arg.result)
+               return arg.result;
 
-       if (!rt) {
-               dst_hold(&net->ipv6.ip6_null_entry->dst);
-               return &net->ipv6.ip6_null_entry->dst;
-       }
-
-       if (rt->rt6i_flags & RTF_REJECT &&
-           rt->dst.error == -EAGAIN) {
-               ip6_rt_put(rt);
-               rt = net->ipv6.ip6_null_entry;
-               dst_hold(&rt->dst);
-       }
-
-       return &rt->dst;
+       dst_hold(&net->ipv6.ip6_null_entry->dst);
+       return &net->ipv6.ip6_null_entry->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                        flp6->saddr = saddr;
                }
                err = rt->dst.error;
-               goto out;
+               if (err != -EAGAIN)
+                       goto out;
        }
 again:
        ip6_rt_put(rt);
index d4bf2c68a545b44873e433930e4e999920de78c9..e6b78ba0e6360ea40be58f07ea1c6657efcca93e 100644 (file)
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        struct rt6_info *rt;
 
        rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
-       if (rt->rt6i_flags & RTF_REJECT &&
-           rt->dst.error == -EAGAIN) {
+       if (rt->dst.error == -EAGAIN) {
                ip6_rt_put(rt);
                rt = net->ipv6.ip6_null_entry;
                dst_hold(&rt->dst);
index c3581973f5d7265a574ae69416a516526ed64e44..8c6c3c8e7eef26899ea22ff85997230c8fd17e7e 100644 (file)
@@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
        return 0;
 
 drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
        kfree_skb(skb);
        return 0;
 }
@@ -1246,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
-               dsfield = ip6_tclass(key->label);
+               dsfield =  key->tos;
        } else {
                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                        encap_limit = t->parms.encap_limit;
@@ -1317,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
-               dsfield = ip6_tclass(key->label);
+               dsfield = key->tos;
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
                /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
index 0a4e28477ad94012b4e7aeb526711076e6259995..54369225766ef8e43ff94f21b4fd670b2cc233a3 100644 (file)
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
                                       unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, n_parts, loop, tmp;
+       unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
 
        /* there must be at least one name, and at least #names+1 length
         * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
                toklen -= 4;
                if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
                        return -EINVAL;
-               if (tmp > toklen)
+               paddedlen = (tmp + 3) & ~3;
+               if (paddedlen > toklen)
                        return -EINVAL;
                princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
                if (!princ->name_parts[loop])
                        return -ENOMEM;
                memcpy(princ->name_parts[loop], xdr, tmp);
                princ->name_parts[loop][tmp] = 0;
-               tmp = (tmp + 3) & ~3;
-               toklen -= tmp;
-               xdr += tmp >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
        toklen -= 4;
        if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
                return -EINVAL;
-       if (tmp > toklen)
+       paddedlen = (tmp + 3) & ~3;
+       if (paddedlen > toklen)
                return -EINVAL;
        princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
        if (!princ->realm)
                return -ENOMEM;
        memcpy(princ->realm, xdr, tmp);
        princ->realm[tmp] = 0;
-       tmp = (tmp + 3) & ~3;
-       toklen -= tmp;
-       xdr += tmp >> 2;
+       toklen -= paddedlen;
+       xdr += paddedlen >> 2;
 
        _debug("%s/...@%s", princ->name_parts[0], princ->realm);
 
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
                                         unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, len;
+       unsigned int toklen = *_toklen, len, paddedlen;
 
        /* there must be at least one tag and one length word */
        if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
        toklen -= 8;
        if (len > max_data_size)
                return -EINVAL;
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > toklen)
+               return -EINVAL;
        td->data_len = len;
 
        if (len > 0) {
                td->data = kmemdup(xdr, len, GFP_KERNEL);
                if (!td->data)
                        return -ENOMEM;
-               len = (len + 3) & ~3;
-               toklen -= len;
-               xdr += len >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
                                    const __be32 **_xdr, unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, len;
+       unsigned int toklen = *_toklen, len, paddedlen;
 
        /* there must be at least one length word */
        if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
        toklen -= 4;
        if (len > AFSTOKEN_K5_TIX_MAX)
                return -EINVAL;
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > toklen)
+               return -EINVAL;
        *_tktlen = len;
 
        _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
                *_ticket = kmemdup(xdr, len, GFP_KERNEL);
                if (!*_ticket)
                        return -ENOMEM;
-               len = (len + 3) & ~3;
-               toklen -= len;
-               xdr += len >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
 {
        const __be32 *xdr = prep->data, *token;
        const char *cp;
-       unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+       unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
        size_t datalen = prep->datalen;
        int ret;
 
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
        if (len < 1 || len > AFSTOKEN_CELL_MAX)
                goto not_xdr;
        datalen -= 4;
-       tmp = (len + 3) & ~3;
-       if (tmp > datalen)
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > datalen)
                goto not_xdr;
 
        cp = (const char *) xdr;
        for (loop = 0; loop < len; loop++)
                if (!isprint(cp[loop]))
                        goto not_xdr;
-       if (len < tmp)
-               for (; loop < tmp; loop++)
-                       if (cp[loop])
-                               goto not_xdr;
+       for (; loop < paddedlen; loop++)
+               if (cp[loop])
+                       goto not_xdr;
        _debug("cellname: [%u/%u] '%*.*s'",
-              len, tmp, len, len, (const char *) xdr);
-       datalen -= tmp;
-       xdr += tmp >> 2;
+              len, paddedlen, len, len, (const char *) xdr);
+       datalen -= paddedlen;
+       xdr += paddedlen >> 2;
 
        /* get the token count */
        if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
                sec_ix = ntohl(*xdr);
                datalen -= 4;
                _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
-               if (toklen < 20 || toklen > datalen)
+               paddedlen = (toklen + 3) & ~3;
+               if (toklen < 20 || toklen > datalen || paddedlen > datalen)
                        goto not_xdr;
-               datalen -= (toklen + 3) & ~3;
-               xdr += (toklen + 3) >> 2;
+               datalen -= paddedlen;
+               xdr += paddedlen >> 2;
 
        } while (--loop > 0);
 
index 8c589230794f9394406d2a0ad2157b7a47d75757..3dcd0ecf3d99f74ec8ed4aad149bd32950ab23ef 100644 (file)
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
                if (sctp_sk(sk)->bind_hash)
                        sctp_put_port(sk);
 
+               sctp_sk(sk)->ep = NULL;
                sock_put(sk);
        }
 
index 048954eee984f28e599084b32fad87b2bd6989d0..9a647214a91ebc583660db320307e0df1e13e5be 100644 (file)
@@ -278,7 +278,6 @@ out:
 
 static int sctp_sock_dump(struct sock *sk, void *p)
 {
-       struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_comm_param *commp = p;
        struct sk_buff *skb = commp->skb;
        struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
        int err = 0;
 
        lock_sock(sk);
-       list_for_each_entry(assoc, &ep->asocs, asocs) {
+       if (!sctp_sk(sk)->ep)
+               goto release;
+       list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
                if (cb->args[4] < cb->args[1])
                        goto next;
 
index 30aa0a529215ae54e43bdcb54a6e1870761996c3..3a8318e518f1c10a375dc3ff6ba6596579bb85a1 100644 (file)
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
        if (err)
                return err;
 
-       sctp_transport_get_idx(net, &hti, pos);
-       obj = sctp_transport_get_next(net, &hti);
-       for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+       obj = sctp_transport_get_idx(net, &hti, pos + 1);
+       for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
                struct sctp_transport *transport = obj;
 
                if (!sctp_transport_hold(transport))
index 1a4db6790e2077d4ea922d1c32cde5352543fa2c..6cdb054484d66d40e4523965457a619bd1c9154f 100644 (file)
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
  * Main IOCTl dispatcher.
  * Check the type of IOCTL and call the appropriate wrapper...
  */
-static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
                                  unsigned int cmd,
                                  struct iw_request_info *info,
                                  wext_ioctl_func standard,
                                  wext_ioctl_func private)
 {
-       struct iwreq *iwr = (struct iwreq *) ifr;
        struct net_device *dev;
        iw_handler      handler;
 
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
         * The copy_to/from_user() of ifr is also dealt with in there */
 
        /* Make sure the device exist */
-       if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL)
+       if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
                return -ENODEV;
 
        /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                else if (private)
                        return private(dev, iwr, cmd, info, handler);
        }
-       /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl)
-               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
 }
 
 /* entry point from dev ioctl */
-static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
+static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
                               unsigned int cmd, struct iw_request_info *info,
                               wext_ioctl_func standard,
                               wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
        if (ret)
                return ret;
 
-       dev_load(net, ifr->ifr_name);
+       dev_load(net, iwr->ifr_name);
        rtnl_lock();
-       ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
+       ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
        rtnl_unlock();
 
        return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device *      dev,
 }
 
 
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                      void __user *arg)
 {
        struct iw_request_info info = { .cmd = cmd, .flags = 0 };
        int ret;
 
-       ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
+       ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
                                  ioctl_standard_call,
                                  ioctl_private_call);
        if (ret >= 0 &&
            IW_IS_GET(cmd) &&
-           copy_to_user(arg, ifr, sizeof(struct iwreq)))
+           copy_to_user(arg, iwr, sizeof(struct iwreq)))
                return -EFAULT;
 
        return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
        info.cmd = cmd;
        info.flags = IW_REQUEST_FLAG_COMPAT;
 
-       ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
+       ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
                                  compat_standard_call,
                                  compat_private_call);
 
index ce753a408c56823dbd1c4b5d4fb5cfe88e7054c4..c583a1e1bd3c16356cf67a772a3e680adec9681a 100644 (file)
@@ -14,7 +14,15 @@ __headers:
 include scripts/Kbuild.include
 
 srcdir        := $(srctree)/$(obj)
-subdirs       := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.))
+
+# When make is run under a fakechroot environment, the function
+# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular
+# files. So, we are using a combination of sort/dir/wildcard which works
+# with fakechroot.
+subdirs       := $(patsubst $(srcdir)/%/,%,\
+                $(filter-out $(srcdir)/,\
+                $(sort $(dir $(wildcard $(srcdir)/*/)))))
+
 # caller may set destination dir (when installing to asm/)
 _dst          := $(if $(dst),$(dst),$(obj))
 
index 3bffdcaaa274e82271a98c65fa0bc43cef53ffe2..b724a0290c75e45cbc89134f2b9ae03166ff27a5 100644 (file)
@@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start,
 int yylex(void);
 int yyparse(void);
 
-void error_with_pos(const char *, ...);
+void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2)));
 
 /*----------------------------------------------------------------------*/
 #define xmalloc(size) ({ void *__ptr = malloc(size);           \
index 90a091b6ae4de74e6c070d77255b9f4b5655629a..eb8144643b78355cea07f301531333001cd1d608 100644 (file)
@@ -196,7 +196,7 @@ clean-files     += config.pot linux.pot
 
 # Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
 PHONY += $(obj)/dochecklxdialog
-$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog
+$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog
 $(obj)/dochecklxdialog:
        $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf)
 
index a9bc5334a478d6774d1409a837665b4f143d8597..0031147798153bdd06aa5c1f3d6f2015298b5b5d 100644 (file)
@@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS];
 static int items_num;
 static int global_exit;
 /* the currently selected button */
-const char *current_instructions = menu_instructions;
+static const char *current_instructions = menu_instructions;
 
 static char *dialog_input_result;
 static int dialog_input_result_len;
@@ -305,7 +305,7 @@ struct function_keys {
 };
 
 static const int function_keys_num = 9;
-struct function_keys function_keys[] = {
+static struct function_keys function_keys[] = {
        {
                .key_str = "F1",
                .func = "Help",
@@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag)
        index = (index + items_num) % items_num;
        while (true) {
                char *str = k_menu_items[index].str;
-               if (strcasestr(str, match_str) != 0)
+               if (strcasestr(str, match_str) != NULL)
                        return index;
                if (flag == FIND_NEXT_MATCH_UP ||
                    flag == MATCH_TINKER_PATTERN_UP)
@@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans)
 
 static void conf(struct menu *menu)
 {
-       struct menu *submenu = 0;
+       struct menu *submenu = NULL;
        const char *prompt = menu_get_prompt(menu);
        struct symbol *sym;
        int res;
@@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu)
 static void conf_choice(struct menu *menu)
 {
        const char *prompt = _(menu_get_prompt(menu));
-       struct menu *child = 0;
+       struct menu *child = NULL;
        struct symbol *active;
        int selected_index = 0;
        int last_top_row = 0;
@@ -1456,7 +1456,7 @@ static void conf_save(void)
        }
 }
 
-void setup_windows(void)
+static void setup_windows(void)
 {
        int lines, columns;
 
index 4b2f44c20caf8941d150f261074b40d589a10376..a64b1c31253e13b918fefe509e1cccf5b4ac248d 100644 (file)
@@ -129,7 +129,7 @@ static void no_colors_theme(void)
        mkattrn(FUNCTION_TEXT, A_REVERSE);
 }
 
-void set_colors()
+void set_colors(void)
 {
        start_color();
        use_default_colors();
@@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no)
        int lines = 0;
 
        if (!text)
-               return 0;
+               return NULL;
 
        for (i = 0; text[i] != '\0' && lines < line_no; i++)
                if (text[i] == '\n')
index d661f2f3ef614c41e22f2346ee9b41b238266715..d23dcbf17457c2c16cf56c73a1ffb3d07e03a257 100755 (executable)
@@ -106,6 +106,7 @@ all_compiled_sources()
                case "$i" in
                        *.[cS])
                                j=${i/\.[cS]/\.o}
+                               j="${j#$tree}"
                                if [ -e $j ]; then
                                        echo $i
                                fi
index d7f282d75cc16efa1d21274e42145280a2f69468..1d32cd20009a3bd35cf77bc11027354c00eb8812 100644 (file)
@@ -164,7 +164,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
        hmac_misc.mode = inode->i_mode;
        crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
        if (evm_hmac_attrs & EVM_ATTR_FSUUID)
-               crypto_shash_update(desc, inode->i_sb->s_uuid,
+               crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
                                    sizeof(inode->i_sb->s_uuid));
        crypto_shash_final(desc, digest);
 }
index 3ab1067db624d860f9303a5e3ea8587ebbed699b..6f885fab9d84a1476eacaf5d1905b4546d964fd6 100644 (file)
@@ -61,7 +61,7 @@ struct ima_rule_entry {
        enum ima_hooks func;
        int mask;
        unsigned long fsmagic;
-       u8 fsuuid[16];
+       uuid_t fsuuid;
        kuid_t uid;
        kuid_t fowner;
        bool (*uid_op)(kuid_t, kuid_t);    /* Handlers for operators       */
@@ -244,7 +244,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
            && rule->fsmagic != inode->i_sb->s_magic)
                return false;
        if ((rule->flags & IMA_FSUUID) &&
-           memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+           !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
                return false;
        if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
                return false;
@@ -711,14 +711,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
                case Opt_fsuuid:
                        ima_log_string(ab, "fsuuid", args[0].from);
 
-                       if (memchr_inv(entry->fsuuid, 0x00,
-                                      sizeof(entry->fsuuid))) {
+                       if (uuid_is_null(&entry->fsuuid)) {
                                result = -EINVAL;
                                break;
                        }
 
-                       result = blk_part_pack_uuid(args[0].from,
-                                                   entry->fsuuid);
+                       result = uuid_parse(args[0].from, &entry->fsuuid);
                        if (!result)
                                entry->flags |= IMA_FSUUID;
                        break;
@@ -1087,7 +1085,7 @@ int ima_policy_show(struct seq_file *m, void *v)
        }
 
        if (entry->flags & IMA_FSUUID) {
-               seq_printf(m, "fsuuid=%pU", entry->fsuuid);
+               seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
                seq_puts(m, " ");
        }
 
index 5088d4b8db2222e28a71baaa4db7c70a30997e48..009e6c98754e484e489ebcdde361e200ee0d6205 100644 (file)
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
        struct snd_pcm_substream *substream;
        const struct snd_pcm_chmap_elem *map;
 
-       if (snd_BUG_ON(!info->chmap))
+       if (!info->chmap)
                return -EINVAL;
        substream = snd_pcm_chmap_substream(info, idx);
        if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
        unsigned int __user *dst;
        int c, count = 0;
 
-       if (snd_BUG_ON(!info->chmap))
+       if (!info->chmap)
                return -EINVAL;
        if (size < 8)
                return -ENOMEM;
index 9e6f54f8c45d2330ee339a7f4c7d1ac86c8e4774..1e26854b3425e23bd9d2942194f3665fa37e06cf 100644 (file)
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
                cycle = increment_cycle_count(cycle, 1);
                if (s->handle_packet(s, 0, cycle, i) < 0) {
                        s->packet_index = -1;
-                       amdtp_stream_pcm_abort(s);
+                       if (in_interrupt())
+                               amdtp_stream_pcm_abort(s);
+                       WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
                        return;
                }
        }
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
        /* Queueing error or detecting invalid payload. */
        if (i < packets) {
                s->packet_index = -1;
-               amdtp_stream_pcm_abort(s);
+               if (in_interrupt())
+                       amdtp_stream_pcm_abort(s);
+               WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
                return;
        }
 
index 7e88317228212a63ad38e6e9880655f6ea567c85..ea1a91e99875e1f06933142d607a8b9880d210b1 100644 (file)
@@ -135,7 +135,7 @@ struct amdtp_stream {
        /* For a PCM substream processing. */
        struct snd_pcm_substream *pcm;
        struct tasklet_struct period_tasklet;
-       unsigned int pcm_buffer_pointer;
+       snd_pcm_uframes_t pcm_buffer_pointer;
        unsigned int pcm_period_pointer;
 
        /* To wait for first packet. */
index 1770f085c2a694a398e2b30312cb79d6c3fd617a..01eb1dc7b5b3b5cf3c070a7656b81452890781e0 100644 (file)
@@ -370,10 +370,12 @@ enum {
 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
 #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
-                       IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)  || \
-                       IS_GLK(pci)
+#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
+                         IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
+                         IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Kabylake-H */
        { PCI_DEVICE(0x8086, 0xa2f0),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Coffelake */
+       { PCI_DEVICE(0x8086, 0xa348),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
index e3f06672fd6df3268be67eb5790edcf91fa27585..e7d766d56c8e7dc60d12b39172f63c3a54f27b49 100644 (file)
@@ -21,8 +21,9 @@
 #include "skl.h"
 
 /* Unique identification for getting NHLT blobs */
-static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
-                               0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+static guid_t osc_guid =
+       GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
+                 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53);
 
 struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 {
@@ -37,7 +38,7 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
                return NULL;
        }
 
-       obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+       obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
        if (obj && obj->type == ACPI_TYPE_BUFFER) {
                nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
                nhlt_table = (struct nhlt_acpi_table *)
index 84e7e698411e6a80a39050514227cbb90556ca13..a2670e9d652dfa09eebb7e53a8ed2b63a7cea152 100644 (file)
@@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
                                           struct map *map, unsigned long offs)
 {
        struct symbol *sym;
-       u64 addr = tp->address + tp->offset - offs;
+       u64 addr = tp->address - offs;
 
        sym = map__find_symbol(map, addr);
        if (!sym)
index 6437ef39aeea61fe592d11d5a022730a8b46a6dd..5fd5c5b8c7b8c1fc29597490abc827898f2fcb77 100644 (file)
@@ -26,6 +26,15 @@ union msr_pstate {
                unsigned res3:21;
                unsigned en:1;
        } bits;
+       struct {
+               unsigned fid:8;
+               unsigned did:6;
+               unsigned vid:8;
+               unsigned iddval:8;
+               unsigned idddiv:2;
+               unsigned res1:30;
+               unsigned en:1;
+       } fam17h_bits;
        unsigned long long val;
 };
 
@@ -35,6 +44,8 @@ static int get_did(int family, union msr_pstate pstate)
 
        if (family == 0x12)
                t = pstate.val & 0xf;
+       else if (family == 0x17)
+               t = pstate.fam17h_bits.did;
        else
                t = pstate.bits.did;
 
@@ -44,16 +55,20 @@ static int get_did(int family, union msr_pstate pstate)
 static int get_cof(int family, union msr_pstate pstate)
 {
        int t;
-       int fid, did;
+       int fid, did, cof;
 
        did = get_did(family, pstate);
-
-       t = 0x10;
-       fid = pstate.bits.fid;
-       if (family == 0x11)
-               t = 0x8;
-
-       return (100 * (fid + t)) >> did;
+       if (family == 0x17) {
+               fid = pstate.fam17h_bits.fid;
+               cof = 200 * fid / did;
+       } else {
+               t = 0x10;
+               fid = pstate.bits.fid;
+               if (family == 0x11)
+                       t = 0x8;
+               cof = (100 * (fid + t)) >> did;
+       }
+       return cof;
 }
 
 /* Needs:
index afb66f80554ecda25e139ddacb74d3a6443e8322..799a18be60aa4628b2dbaa47e8484a802769b558 100644 (file)
@@ -70,6 +70,8 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
 #define CPUPOWER_CAP_IS_SNB            0x00000020
 #define CPUPOWER_CAP_INTEL_IDA         0x00000040
 
+#define CPUPOWER_AMD_CPBDIS            0x02000000
+
 #define MAX_HW_PSTATES 10
 
 struct cpupower_cpu_info {
index 1609243f5c64d16cb19f04fb1805b855c5c9fc57..601d719d4e60dfba6f4d104edbf4568691143cd5 100644 (file)
@@ -2,11 +2,14 @@
 
 #include "helpers/helpers.h"
 
+#define MSR_AMD_HWCR   0xc0010015
+
 int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
                        int *states)
 {
        struct cpupower_cpu_info cpu_info;
        int ret;
+       unsigned long long val;
 
        *support = *active = *states = 0;
 
@@ -16,10 +19,22 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
 
        if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_CBP) {
                *support = 1;
-               amd_pci_get_num_boost_states(active, states);
-               if (ret <= 0)
-                       return ret;
-               *support = 1;
+
+               /* AMD Family 0x17 does not utilize PCI D18F4 like prior
+                * families and has no fixed discrete boost states but
+                * has Hardware determined variable increments instead.
+                */
+
+               if (cpu_info.family == 0x17) {
+                       if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
+                               if (!(val & CPUPOWER_AMD_CPBDIS))
+                                       *active = 1;
+                       }
+               } else {
+                       ret = amd_pci_get_num_boost_states(active, states);
+                       if (ret)
+                               return ret;
+               }
        } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA)
                *support = *active = 1;
        return 0;
index b11294730771bed6766f77138460813f8abbfce8..0dafba2c1e7d28c4eda6904274baf7537ee3062c 100644 (file)
@@ -57,7 +57,6 @@ unsigned int list_header_only;
 unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
-unsigned int do_skl_residency;
 unsigned int do_slm_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
@@ -93,6 +92,7 @@ unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
 int base_cpu;
+int do_migrate;
 double discover_bclk(unsigned int family, unsigned int model);
 unsigned int has_hwp;  /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
                        /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -151,6 +151,8 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
 #define MAX_ADDED_COUNTERS 16
 
 struct thread_data {
+       struct timeval tv_begin;
+       struct timeval tv_end;
        unsigned long long tsc;
        unsigned long long aperf;
        unsigned long long mperf;
@@ -301,6 +303,9 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
 
 int cpu_migrate(int cpu)
 {
+       if (!do_migrate)
+               return 0;
+
        CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
        CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
        if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -384,8 +389,14 @@ struct msr_counter bic[] = {
        { 0x0, "CPU" },
        { 0x0, "Mod%c6" },
        { 0x0, "sysfs" },
+       { 0x0, "Totl%C0" },
+       { 0x0, "Any%C0" },
+       { 0x0, "GFX%C0" },
+       { 0x0, "CPUGFX%" },
 };
 
+
+
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define        BIC_Package     (1ULL << 0)
 #define        BIC_Avg_MHz     (1ULL << 1)
@@ -426,6 +437,10 @@ struct msr_counter bic[] = {
 #define        BIC_CPU         (1ULL << 36)
 #define        BIC_Mod_c6      (1ULL << 37)
 #define        BIC_sysfs       (1ULL << 38)
+#define        BIC_Totl_c0     (1ULL << 39)
+#define        BIC_Any_c0      (1ULL << 40)
+#define        BIC_GFX_c0      (1ULL << 41)
+#define        BIC_CPUGFX      (1ULL << 42)
 
 unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL;
 unsigned long long bic_present = BIC_sysfs;
@@ -521,6 +536,8 @@ void print_header(char *delim)
        struct msr_counter *mp;
        int printed = 0;
 
+       if (debug)
+               outp += sprintf(outp, "usec %s", delim);
        if (DO_BIC(BIC_Package))
                outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Core))
@@ -599,12 +616,14 @@ void print_header(char *delim)
        if (DO_BIC(BIC_GFXMHz))
                outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : ""));
 
-       if (do_skl_residency) {
+       if (DO_BIC(BIC_Totl_c0))
                outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_Any_c0))
                outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_GFX_c0))
                outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_CPUGFX))
                outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : ""));
-       }
 
        if (DO_BIC(BIC_Pkgpc2))
                outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : ""));
@@ -771,6 +790,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
                (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
                return 0;
 
+       if (debug) {
+               /* on each row, print how many usec each timestamp took to gather */
+               struct timeval tv;
+
+               timersub(&t->tv_end, &t->tv_begin, &tv);
+               outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec);
+       }
+
        interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
 
        tsc = t->tsc * tsc_tweak;
@@ -912,12 +939,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
 
        /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
-       if (do_skl_residency) {
+       if (DO_BIC(BIC_Totl_c0))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
+       if (DO_BIC(BIC_Any_c0))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc);
+       if (DO_BIC(BIC_GFX_c0))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc);
+       if (DO_BIC(BIC_CPUGFX))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc);
-       }
 
        if (DO_BIC(BIC_Pkgpc2))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc);
@@ -1038,12 +1067,16 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
        int i;
        struct msr_counter *mp;
 
-       if (do_skl_residency) {
+
+       if (DO_BIC(BIC_Totl_c0))
                old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
+       if (DO_BIC(BIC_Any_c0))
                old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
+       if (DO_BIC(BIC_GFX_c0))
                old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
+       if (DO_BIC(BIC_CPUGFX))
                old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
-       }
+
        old->pc2 = new->pc2 - old->pc2;
        if (DO_BIC(BIC_Pkgpc3))
                old->pc3 = new->pc3 - old->pc3;
@@ -1292,12 +1325,14 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                return 0;
 
-       if (do_skl_residency) {
+       if (DO_BIC(BIC_Totl_c0))
                average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
+       if (DO_BIC(BIC_Any_c0))
                average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
+       if (DO_BIC(BIC_GFX_c0))
                average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
+       if (DO_BIC(BIC_CPUGFX))
                average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
-       }
 
        average.packages.pc2 += p->pc2;
        if (DO_BIC(BIC_Pkgpc3))
@@ -1357,12 +1392,14 @@ void compute_average(struct thread_data *t, struct core_data *c,
        average.cores.c7 /= topo.num_cores;
        average.cores.mc6_us /= topo.num_cores;
 
-       if (do_skl_residency) {
+       if (DO_BIC(BIC_Totl_c0))
                average.packages.pkg_wtd_core_c0 /= topo.num_packages;
+       if (DO_BIC(BIC_Any_c0))
                average.packages.pkg_any_core_c0 /= topo.num_packages;
+       if (DO_BIC(BIC_GFX_c0))
                average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
+       if (DO_BIC(BIC_CPUGFX))
                average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
-       }
 
        average.packages.pc2 /= topo.num_packages;
        if (DO_BIC(BIC_Pkgpc3))
@@ -1482,6 +1519,9 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
+
+       gettimeofday(&t->tv_begin, (struct timezone *)NULL);
+
        if (cpu_migrate(cpu)) {
                fprintf(outf, "Could not migrate to CPU %d\n", cpu);
                return -1;
@@ -1565,7 +1605,7 @@ retry:
 
        /* collect core counters only for 1st thread in core */
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
-               return 0;
+               goto done;
 
        if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
@@ -1601,15 +1641,21 @@ retry:
 
        /* collect package counters only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
-               return 0;
+               goto done;
 
-       if (do_skl_residency) {
+       if (DO_BIC(BIC_Totl_c0)) {
                if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
                        return -10;
+       }
+       if (DO_BIC(BIC_Any_c0)) {
                if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
                        return -11;
+       }
+       if (DO_BIC(BIC_GFX_c0)) {
                if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
                        return -12;
+       }
+       if (DO_BIC(BIC_CPUGFX)) {
                if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
                        return -13;
        }
@@ -1688,6 +1734,8 @@ retry:
                if (get_mp(cpu, mp, &p->counter[i]))
                        return -10;
        }
+done:
+       gettimeofday(&t->tv_end, (struct timezone *)NULL);
 
        return 0;
 }
@@ -3895,6 +3943,9 @@ void decode_misc_enable_msr(void)
 {
        unsigned long long msr;
 
+       if (!genuine_intel)
+               return;
+
        if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
                fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n",
                        base_cpu, msr,
@@ -4198,7 +4249,12 @@ void process_cpuid()
                BIC_PRESENT(BIC_Pkgpc10);
        }
        do_irtl_hsw = has_hsw_msrs(family, model);
-       do_skl_residency = has_skl_msrs(family, model);
+       if (has_skl_msrs(family, model)) {
+               BIC_PRESENT(BIC_Totl_c0);
+               BIC_PRESENT(BIC_Any_c0);
+               BIC_PRESENT(BIC_GFX_c0);
+               BIC_PRESENT(BIC_CPUGFX);
+       }
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
 
@@ -4578,7 +4634,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 17.04.12"
+       fprintf(outf, "turbostat version 17.06.23"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -4951,6 +5007,7 @@ void cmdline(int argc, char **argv)
                {"hide",        required_argument,      0, 'H'},        // meh, -h taken by --help
                {"Joules",      no_argument,            0, 'J'},
                {"list",        no_argument,            0, 'l'},
+               {"migrate",     no_argument,            0, 'm'},
                {"out",         required_argument,      0, 'o'},
                {"quiet",       no_argument,            0, 'q'},
                {"show",        required_argument,      0, 's'},
@@ -4962,7 +5019,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
+       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
                                long_options, &option_index)) != -1) {
                switch (opt) {
                case 'a':
@@ -5005,6 +5062,9 @@ void cmdline(int argc, char **argv)
                        list_header_only++;
                        quiet++;
                        break;
+               case 'm':
+                       do_migrate = 1;
+                       break;
                case 'o':
                        outf = fopen_or_die(optarg, "w");
                        break;
index 971c9ffdcb504ad8172758849aa5f0156f010bd9..a711eec0c8953f6c8693de17f7c6dc382c3e262a 100644 (file)
@@ -1,10 +1,27 @@
-DESTDIR ?=
+CC             = $(CROSS_COMPILE)gcc
+BUILD_OUTPUT    := $(CURDIR)
+PREFIX         := /usr
+DESTDIR                :=
+
+ifeq ("$(origin O)", "command line")
+       BUILD_OUTPUT := $(O)
+endif
 
 x86_energy_perf_policy : x86_energy_perf_policy.c
+CFLAGS +=      -Wall
+CFLAGS +=      -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+
+%: %.c
+       @mkdir -p $(BUILD_OUTPUT)
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@
 
+.PHONY : clean
 clean :
-       rm -f x86_energy_perf_policy
+       @rm -f $(BUILD_OUTPUT)/x86_energy_perf_policy
+
+install : x86_energy_perf_policy
+       install -d  $(DESTDIR)$(PREFIX)/bin
+       install $(BUILD_OUTPUT)/x86_energy_perf_policy $(DESTDIR)$(PREFIX)/bin/x86_energy_perf_policy
+       install -d  $(DESTDIR)$(PREFIX)/share/man/man8
+       install x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8
 
-install :
-       install x86_energy_perf_policy ${DESTDIR}/usr/bin/
-       install x86_energy_perf_policy.8 ${DESTDIR}/usr/share/man/man8/
index 8eaaad648cdb92743e373adaacb8a16bbdc8f551..17db1c3af4d0bb3901482bbfed9e2cff8bc8cb6f 100644 (file)
-.\"  This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
+.\"  This page Copyright (C) 2010 - 2015 Len Brown <len.brown@intel.com>
 .\"  Distributed under the GPL, Copyleft 1994.
 .TH X86_ENERGY_PERF_POLICY 8
 .SH NAME
-x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
+x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Specific Registers
 .SH SYNOPSIS
-.ft B
 .B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB "\-r"
+.RB "[ options ] [ scope ] [field \ value]"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'performance'
+.RB "scope: \-\-cpu\ cpu-list | \-\-pkg\ pkg-list"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'normal'
+.RB "cpu-list, pkg-list: # | #,# | #-# | all"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'powersave'
+.RB "field: \-\-all | \-\-epb | \-\-hwp-epp | \-\-hwp-min | \-\-hwp-max | \-\-hwp-desired"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB n
+.RB "other: (\-\-force | \-\-hwp-enable | \-\-turbo-enable)  value)"
 .br
+.RB "value: # | default | performance | balance-performance | balance-power | power"
 .SH DESCRIPTION
 \fBx86_energy_perf_policy\fP
-allows software to convey
-its policy for the relative importance of performance
-versus energy savings to the processor.
+displays and updates energy-performance policy settings specific to
+Intel Architecture Processors.  Settings are accessed via Model Specific Register (MSR)
+updates, no matter if the Linux cpufreq sub-system is enabled or not.
 
-The processor uses this information in model-specific ways
-when it must select trade-offs between performance and
-energy efficiency.
+Policy in MSR_IA32_ENERGY_PERF_BIAS (EPB)
+may affect a wide range of hardware decisions,
+such as how aggressively the hardware enters and exits CPU idle states (C-states)
+and Processor Performance States (P-states).
+This policy hint does not replace explicit OS C-state and P-state selection.
+Rather, it tells the hardware how aggressively to implement those selections.
+Further, it allows the OS to influence energy/performance trade-offs where there
+is no software interface, such as in the opportunistic "turbo-mode" P-state range.
+Note that MSR_IA32_ENERGY_PERF_BIAS is defined per CPU,
+but some implementations
+share a single MSR among all CPUs in each processor package.
+On those systems, a write to EPB on one processor will
+be visible, and will have an effect, on all CPUs
+in the same processor package.
 
-This policy hint does not supersede Processor Performance states
-(P-states) or CPU Idle power states (C-states), but allows
-software to have influence where it would otherwise be unable
-to express a preference.
+Hardware P-States (HWP) are effectively an expansion of hardware
+P-state control from the opportunistic turbo-mode P-state range
+to include the entire range of available P-states.
+On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP.
+That influence was removed in subsequent generations,
+where it was moved to the
+Energy_Performance_Preference (EPP) field in
+a pair of dedicated MSRs -- MSR_IA32_HWP_REQUEST and MSR_IA32_HWP_REQUEST_PKG.
 
-For example, this setting may tell the hardware how
-aggressively or conservatively to control frequency
-in the "turbo range" above the explicitly OS-controlled
-P-state frequency range.  It may also tell the hardware
-how aggressively is should enter the OS requested C-states.
+EPP is the most commonly managed knob in HWP mode,
+but MSR_IA32_HWP_REQUEST also allows the user to specify
+minimum-frequency for Quality-of-Service,
+and maximum-frequency for power-capping.
+MSR_IA32_HWP_REQUEST is defined per-CPU.
 
-Support for this feature is indicated by CPUID.06H.ECX.bit3
-per the Intel Architectures Software Developer's Manual.
+MSR_IA32_HWP_REQUEST_PKG has the same capability as MSR_IA32_HWP_REQUEST,
+but it can simultaneously set the default policy for all CPUs within a package.
+A bit in per-CPU MSR_IA32_HWP_REQUEST indicates whether it is
+over-ruled-by or exempt-from MSR_IA32_HWP_REQUEST_PKG.
 
-.SS Options
-\fB-c\fP limits operation to a single CPU.
-The default is to operate on all CPUs.
-Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
-logical processor, but that the initial implementations
-of the MSR were shared among all processors in each package.
-.PP
-\fB-v\fP increases verbosity.  By default
-x86_energy_perf_policy is silent.
-.PP
-\fB-r\fP is for "read-only" mode - the unchanged state
-is read and displayed.
+MSR_HWP_CAPABILITIES shows the default values for the fields
+in MSR_IA32_HWP_REQUEST.  It is displayed when no values
+are being written.
+
+.SS SCOPE OPTIONS
 .PP
-.I performance
-Set a policy where performance is paramount.
-The processor will be unwilling to sacrifice any performance
-for the sake of energy saving. This is the hardware default.
+\fB-c, --cpu\fP Operate on the MSR_IA32_HWP_REQUEST for each CPU in a CPU-list.
+The CPU-list may be comma-separated CPU numbers, with dash for range
+or the string "all".  Eg. '--cpu 1,4,6-8' or '--cpu all'.
+When --cpu is used, \fB--hwp-use-pkg\fP is available, which specifies whether the per-cpu
+MSR_IA32_HWP_REQUEST should be over-ruled by MSR_IA32_HWP_REQUEST_PKG (1),
+or exempt from MSR_IA32_HWP_REQUEST_PKG (0).
+
+\fB-p, --pkg\fP Operate on the MSR_IA32_HWP_REQUEST_PKG for each package in the package-list.
+The list is a string of individual package numbers separated
+by commas, and or ranges of package numbers separated by a dash,
+or the string "all".
+For example '--pkg 1,3' or '--pkg all'
+
+.SS VALUE OPTIONS
 .PP
-.I normal
+.I normal | default
 Set a policy with a normal balance between performance and energy efficiency.
 The processor will tolerate minor performance compromise
 for potentially significant energy savings.
-This reasonable default for most desktops and servers.
+This is a reasonable default for most desktops and servers.
+"default" is a synonym for "normal".
 .PP
-.I powersave
+.I performance
+Set a policy for maximum performance,
+accepting no performance sacrifice for the benefit of energy efficiency.
+.PP
+.I balance-performance
+Set a policy with a high priority on performance,
+but allowing some performance loss to benefit energy efficiency.
+.PP
+.I balance-power
+Set a policy where the performance and power are balanced.
+This is the default.
+.PP
+.I power
 Set a policy where the processor can accept
-a measurable performance hit to maximize energy efficiency.
+a measurable performance impact to maximize energy efficiency.
+
 .PP
-.I n
-Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
-The range of valid numbers is 0-15, where 0 is maximum
-performance and 15 is maximum energy efficiency.
+The following table shows the mapping from the value strings above to actual MSR values.
+This mapping is defined in the Linux-kernel header, msr-index.h.
 
+.nf
+VALUE STRING           EPB     EPP
+performance            0       0
+balance-performance    4       128
+normal, default                6       128
+balance-power          8       192
+power                  15      255
+.fi
+.PP
+For MSR_IA32_HWP_REQUEST performance fields
+(--hwp-min, --hwp-max, --hwp-desired), the value option
+is in units of 100 MHz, Eg. 12 signifies 1200 MHz.
+
+.SS FIELD OPTIONS
+\fB-a, --all value-string\fP Sets all EPB and EPP and HWP limit fields to the value associated with
+the value-string.  In addition, enables turbo-mode and HWP-mode, if they were previous disabled.
+Thus "--all normal" will set a system without cpufreq into a well known configuration.
+.PP
+\fB-B, --epb\fP set EPB per-core or per-package.
+See value strings in the table above.
+.PP
+\fB-d, --debug\fP debug increases verbosity.  By default
+x86_energy_perf_policy is silent for updates,
+and verbose for read-only mode.
+.PP
+\fB-P, --hwp-epp\fP set HWP.EPP per-core or per-package.
+See value strings in the table above.
+.PP
+\fB-m, --hwp-min\fP request HWP to not go below the specified core/bus ratio.
+The "default" is the value found in IA32_HWP_CAPABILITIES.min.
+.PP
+\fB-M, --hwp-max\fP request HWP not exceed a the specified core/bus ratio.
+The "default" is the value found in IA32_HWP_CAPABILITIES.max.
+.PP
+\fB-D, --hwp-desired\fP request HWP 'desired' frequency.
+The "normal" setting is 0, which
+corresponds to 'full autonomous' HWP control.
+Non-zero performance values request a specific performance
+level on this processor, specified in multiples of 100 MHz.
+.PP
+\fB-w, --hwp-window\fP specify integer number of microsec
+in the sliding window that HWP uses to maintain average frequency.
+This parameter is meaningful only when the "desired" field above is non-zero.
+Default is 0, allowing the HW to choose.
+.SH OTHER OPTIONS
+.PP
+\fB-f, --force\fP writes the specified values without bounds checking.
+.PP
+\fB-U, --hwp-use-pkg\fP (0 | 1), when used in conjunction with --cpu,
+indicates whether the per-CPU MSR_IA32_HWP_REQUEST should be overruled (1)
+or exempt (0) from per-Package MSR_IA32_HWP_REQUEST_PKG settings.
+The default is exempt.
+.PP
+\fB-H, --hwp-enable\fP enable HardWare-P-state (HWP) mode.  Once enabled, system RESET is required to disable HWP mode.
+.PP
+\fB-t, --turbo-enable\fP enable (1) or disable (0) turbo mode.
+.PP
+\fB-v, --version\fP print version and exit.
+.PP
+If no request to change policy is made,
+the default behavior is to read
+and display the current system state,
+including the default capabilities.
+.SH WARNING
+.PP
+This utility writes directly to Model Specific Registers.
+There is no locking or coordination should this utility
+be used to modify HWP limit fields at the same time that
+intel_pstate's sysfs attributes access the same MSRs.
+.PP
+Note that --hwp-desired and --hwp-window are considered experimental.
+Future versions of Linux reserve the right to access these
+fields internally -- potentially conflicting with user-space access.
+.SH EXAMPLE
+.nf
+# sudo x86_energy_perf_policy
+cpu0: EPB 6
+cpu0: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu0: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu1: EPB 6
+cpu1: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu1: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu2: EPB 6
+cpu2: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu2: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu3: EPB 6
+cpu3: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu3: HWP_CAP: low 1 eff 8 guar 27 high 35
+.fi
 .SH NOTES
-.B "x86_energy_perf_policy "
+.B "x86_energy_perf_policy"
 runs only as root.
 .SH FILES
 .ta
 .nf
 /dev/cpu/*/msr
 .fi
-
 .SH "SEE ALSO"
+.nf
 msr(4)
+Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+.fi
 .PP
 .SH AUTHORS
 .nf
-Written by Len Brown <len.brown@intel.com>
+Len Brown
index 40b3e5482f8ab745efdd39c58e5a6238ab6fc31b..65bbe627a425f6361d1249642ad29847abae0575 100644 (file)
  * policy preference bias on recent X86 processors.
  */
 /*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2010 - 2017 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * This program is released under GPL v2
  */
 
+#define _GNU_SOURCE
+#include MSRHEADER
 #include <stdio.h>
 #include <unistd.h>
 #include <sys/types.h>
+#include <sched.h>
 #include <sys/stat.h>
 #include <sys/resource.h>
+#include <getopt.h>
+#include <err.h>
 #include <fcntl.h>
 #include <signal.h>
 #include <sys/time.h>
+#include <limits.h>
 #include <stdlib.h>
 #include <string.h>
+#include <cpuid.h>
+#include <errno.h>
+
+#define        OPTARG_NORMAL                   (INT_MAX - 1)
+#define        OPTARG_POWER                    (INT_MAX - 2)
+#define        OPTARG_BALANCE_POWER            (INT_MAX - 3)
+#define        OPTARG_BALANCE_PERFORMANCE      (INT_MAX - 4)
+#define        OPTARG_PERFORMANCE              (INT_MAX - 5)
+
+struct msr_hwp_cap {
+       unsigned char highest;
+       unsigned char guaranteed;
+       unsigned char efficient;
+       unsigned char lowest;
+};
 
-unsigned int verbose;          /* set with -v */
-unsigned int read_only;                /* set with -r */
+struct msr_hwp_request {
+       unsigned char hwp_min;
+       unsigned char hwp_max;
+       unsigned char hwp_desired;
+       unsigned char hwp_epp;
+       unsigned int hwp_window;
+       unsigned char hwp_use_pkg;
+} req_update;
+
+unsigned int debug;
+unsigned int verbose;
+unsigned int force;
 char *progname;
-unsigned long long new_bias;
-int cpu = -1;
+int base_cpu;
+unsigned char update_epb;
+unsigned long long new_epb;
+unsigned char turbo_is_enabled;
+unsigned char update_turbo;
+unsigned char turbo_update_value;
+unsigned char update_hwp_epp;
+unsigned char update_hwp_min;
+unsigned char update_hwp_max;
+unsigned char update_hwp_desired;
+unsigned char update_hwp_window;
+unsigned char update_hwp_use_pkg;
+unsigned char update_hwp_enable;
+#define hwp_update_enabled() (update_hwp_enable | update_hwp_epp | update_hwp_max | update_hwp_min | update_hwp_desired | update_hwp_window | update_hwp_use_pkg)
+int max_cpu_num;
+int max_pkg_num;
+#define MAX_PACKAGES 64
+unsigned int first_cpu_in_pkg[MAX_PACKAGES];
+unsigned long long pkg_present_set;
+unsigned long long pkg_selected_set;
+cpu_set_t *cpu_present_set;
+cpu_set_t *cpu_selected_set;
+int genuine_intel;
+
+size_t cpu_setsize;
+
+char *proc_stat = "/proc/stat";
+
+unsigned int has_epb;  /* MSR_IA32_ENERGY_PERF_BIAS */
+unsigned int has_hwp;  /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
+                       /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
+unsigned int has_hwp_notify;           /* IA32_HWP_INTERRUPT */
+unsigned int has_hwp_activity_window;  /* IA32_HWP_REQUEST[bits 41:32] */
+unsigned int has_hwp_epp;      /* IA32_HWP_REQUEST[bits 31:24] */
+unsigned int has_hwp_request_pkg;      /* IA32_HWP_REQUEST_PKG */
+
+unsigned int bdx_highest_ratio;
 
 /*
- * Usage:
- *
- * -c cpu: limit action to a single CPU (default is all CPUs)
- * -v: verbose output (can invoke more than once)
- * -r: read-only, don't change any settings
- *
- *  performance
- *     Performance is paramount.
- *     Unwilling to sacrifice any performance
- *     for the sake of energy saving. (hardware default)
- *
- *  normal
- *     Can tolerate minor performance compromise
- *     for potentially significant energy savings.
- *     (reasonable default for most desktops and servers)
- *
- *  powersave
- *     Can tolerate significant performance hit
- *     to maximize energy savings.
- *
- * n
- *     a numerical value to write to the underlying MSR.
+ * maintain compatibility with original implementation, but don't document it:
  */
 void usage(void)
 {
-       printf("%s: [-c cpu] [-v] "
-               "(-r | 'performance' | 'normal' | 'powersave' | n)\n",
-               progname);
+       fprintf(stderr, "%s [options] [scope][field value]\n", progname);
+       fprintf(stderr, "scope: --cpu cpu-list [--hwp-use-pkg #] | --pkg pkg-list\n");
+       fprintf(stderr, "field: --all | --epb | --hwp-epp | --hwp-min | --hwp-max | --hwp-desired\n");
+       fprintf(stderr, "other: --hwp-enable | --turbo-enable (0 | 1) | --help | --force\n");
+       fprintf(stderr,
+               "value: ( # | \"normal\" | \"performance\" | \"balance-performance\" | \"balance-power\"| \"power\")\n");
+       fprintf(stderr, "--hwp-window usec\n");
+
+       fprintf(stderr, "Specify only Energy Performance BIAS (legacy usage):\n");
+       fprintf(stderr, "%s: [-c cpu] [-v] (-r | policy-value )\n", progname);
+
        exit(1);
 }
 
-#define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
+/*
+ * If bdx_highest_ratio is set,
+ * then we must translate between MSR format and simple ratio
+ * used on the cmdline.
+ */
+int ratio_2_msr_perf(int ratio)
+{
+       int msr_perf;
+
+       if (!bdx_highest_ratio)
+               return ratio;
+
+       msr_perf = ratio * 255 / bdx_highest_ratio;
+
+       if (debug)
+               fprintf(stderr, "%d = ratio_to_msr_perf(%d)\n", msr_perf, ratio);
+
+       return msr_perf;
+}
+int msr_perf_2_ratio(int msr_perf)
+{
+       int ratio;
+       double d;
+
+       if (!bdx_highest_ratio)
+               return msr_perf;
+
+       d = (double)msr_perf * (double) bdx_highest_ratio / 255.0;
+       d = d + 0.5;    /* round */
+       ratio = (int)d;
+
+       if (debug)
+               fprintf(stderr, "%d = msr_perf_ratio(%d) {%f}\n", ratio, msr_perf, d);
+
+       return ratio;
+}
+int parse_cmdline_epb(int i)
+{
+       if (!has_epb)
+               errx(1, "EPB not enabled on this platform");
+
+       update_epb = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+               return ENERGY_PERF_BIAS_POWERSAVE;
+       case OPTARG_BALANCE_POWER:
+               return ENERGY_PERF_BIAS_BALANCE_POWERSAVE;
+       case OPTARG_NORMAL:
+               return ENERGY_PERF_BIAS_NORMAL;
+       case OPTARG_BALANCE_PERFORMANCE:
+               return ENERGY_PERF_BIAS_BALANCE_PERFORMANCE;
+       case OPTARG_PERFORMANCE:
+               return ENERGY_PERF_BIAS_PERFORMANCE;
+       }
+       if (i < 0 || i > ENERGY_PERF_BIAS_POWERSAVE)
+               errx(1, "--epb must be from 0 to 15");
+       return i;
+}
+
+#define HWP_CAP_LOWEST 0
+#define HWP_CAP_HIGHEST 255
+
+/*
+ * "performance" changes hwp_min to cap.highest
+ * All others leave it at cap.lowest
+ */
+int parse_cmdline_hwp_min(int i)
+{
+       update_hwp_min = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+       case OPTARG_BALANCE_POWER:
+       case OPTARG_NORMAL:
+       case OPTARG_BALANCE_PERFORMANCE:
+               return HWP_CAP_LOWEST;
+       case OPTARG_PERFORMANCE:
+               return HWP_CAP_HIGHEST;
+       }
+       return i;
+}
+/*
+ * "power" changes hwp_max to cap.lowest
+ * All others leave it at cap.highest
+ */
+int parse_cmdline_hwp_max(int i)
+{
+       update_hwp_max = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+               return HWP_CAP_LOWEST;
+       case OPTARG_NORMAL:
+       case OPTARG_BALANCE_POWER:
+       case OPTARG_BALANCE_PERFORMANCE:
+       case OPTARG_PERFORMANCE:
+               return HWP_CAP_HIGHEST;
+       }
+       return i;
+}
+/*
+ * for --hwp-des, all strings leave it in autonomous mode
+ * If you want to change it, you need to explicitly pick a value
+ */
+int parse_cmdline_hwp_desired(int i)
+{
+       update_hwp_desired = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+       case OPTARG_BALANCE_POWER:
+       case OPTARG_BALANCE_PERFORMANCE:
+       case OPTARG_NORMAL:
+       case OPTARG_PERFORMANCE:
+               return 0;       /* autonomous */
+       }
+       return i;
+}
+
+int parse_cmdline_hwp_window(int i)
+{
+       unsigned int exponent;
+
+       update_hwp_window = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+       case OPTARG_BALANCE_POWER:
+       case OPTARG_NORMAL:
+       case OPTARG_BALANCE_PERFORMANCE:
+       case OPTARG_PERFORMANCE:
+               return 0;
+       }
+       if (i < 0 || i > 1270000000) {
+               fprintf(stderr, "--hwp-window: 0 for auto; 1 - 1270000000 usec for window duration\n");
+               usage();
+       }
+       for (exponent = 0; ; ++exponent) {
+               if (debug)
+                       printf("%d 10^%d\n", i, exponent);
+
+               if (i <= 127)
+                       break;
+
+               i = i / 10;
+       }
+       if (debug)
+               fprintf(stderr, "%d*10^%d: 0x%x\n", i, exponent, (exponent << 7) | i);
+
+       return (exponent << 7) | i;
+}
+int parse_cmdline_hwp_epp(int i)
+{
+       update_hwp_epp = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+               return HWP_EPP_POWERSAVE;
+       case OPTARG_BALANCE_POWER:
+               return HWP_EPP_BALANCE_POWERSAVE;
+       case OPTARG_NORMAL:
+       case OPTARG_BALANCE_PERFORMANCE:
+               return HWP_EPP_BALANCE_PERFORMANCE;
+       case OPTARG_PERFORMANCE:
+               return HWP_EPP_PERFORMANCE;
+       }
+       if (i < 0 || i > 0xff) {
+               fprintf(stderr, "--hwp-epp must be from 0 to 0xff\n");
+               usage();
+       }
+       return i;
+}
+int parse_cmdline_turbo(int i)
+{
+       update_turbo = 1;
+
+       switch (i) {
+       case OPTARG_POWER:
+               return 0;
+       case OPTARG_NORMAL:
+       case OPTARG_BALANCE_POWER:
+       case OPTARG_BALANCE_PERFORMANCE:
+       case OPTARG_PERFORMANCE:
+               return 1;
+       }
+       if (i < 0 || i > 1) {
+               fprintf(stderr, "--turbo-enable: 1 to enable, 0 to disable\n");
+               usage();
+       }
+       return i;
+}
+
+int parse_optarg_string(char *s)
+{
+       int i;
+       char *endptr;
+
+       if (!strncmp(s, "default", 7))
+               return OPTARG_NORMAL;
+
+       if (!strncmp(s, "normal", 6))
+               return OPTARG_NORMAL;
+
+       if (!strncmp(s, "power", 9))
+               return OPTARG_POWER;
+
+       if (!strncmp(s, "balance-power", 17))
+               return OPTARG_BALANCE_POWER;
+
+       if (!strncmp(s, "balance-performance", 19))
+               return OPTARG_BALANCE_PERFORMANCE;
+
+       if (!strncmp(s, "performance", 11))
+               return OPTARG_PERFORMANCE;
+
+       i = strtol(s, &endptr, 0);
+       if (s == endptr) {
+               fprintf(stderr, "no digits in \"%s\"\n", s);
+               usage();
+       }
+       if (i == LONG_MIN || i == LONG_MAX)
+               errx(-1, "%s", s);
+
+       if (i > 0xFF)
+               errx(-1, "%d (0x%x) must be < 256", i, i);
+
+       if (i < 0)
+               errx(-1, "%d (0x%x) must be >= 0", i, i);
+       return i;
+}
+
+void parse_cmdline_all(char *s)
+{
+       force++;
+       update_hwp_enable = 1;
+       req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(s));
+       req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(s));
+       req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(s));
+       if (has_epb)
+               new_epb = parse_cmdline_epb(parse_optarg_string(s));
+       turbo_update_value = parse_cmdline_turbo(parse_optarg_string(s));
+       req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(s));
+       req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(s));
+}
+
+void validate_cpu_selected_set(void)
+{
+       int cpu;
+
+       if (CPU_COUNT_S(cpu_setsize, cpu_selected_set) == 0)
+               errx(0, "no CPUs requested");
+
+       for (cpu = 0; cpu <= max_cpu_num; ++cpu) {
+               if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set))
+                       if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+                               errx(1, "Requested cpu% is not present", cpu);
+       }
+}
+
+void parse_cmdline_cpu(char *s)
+{
+       char *startp, *endp;
+       int cpu = 0;
+
+       if (pkg_selected_set) {
+               usage();
+               errx(1, "--cpu | --pkg");
+       }
+       cpu_selected_set = CPU_ALLOC((max_cpu_num + 1));
+       if (cpu_selected_set == NULL)
+               err(1, "cpu_selected_set");
+       CPU_ZERO_S(cpu_setsize, cpu_selected_set);
+
+       for (startp = s; startp && *startp;) {
+
+               if (*startp == ',') {
+                       startp++;
+                       continue;
+               }
+
+               if (*startp == '-') {
+                       int end_cpu;
 
-#define        BIAS_PERFORMANCE                0
-#define BIAS_BALANCE                   6
-#define        BIAS_POWERSAVE                  15
+                       startp++;
+                       end_cpu = strtol(startp, &endp, 10);
+                       if (startp == endp)
+                               continue;
+
+                       while (cpu <= end_cpu) {
+                               if (cpu > max_cpu_num)
+                                       errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
+                               CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+                               cpu++;
+                       }
+                       startp = endp;
+                       continue;
+               }
+
+               if (strncmp(startp, "all", 3) == 0) {
+                       for (cpu = 0; cpu <= max_cpu_num; cpu += 1) {
+                               if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+                                       CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+                       }
+                       startp += 3;
+                       if (*startp == 0)
+                               break;
+               }
+               /* "--cpu even" is not documented */
+               if (strncmp(startp, "even", 4) == 0) {
+                       for (cpu = 0; cpu <= max_cpu_num; cpu += 2) {
+                               if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+                                       CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+                       }
+                       startp += 4;
+                       if (*startp == 0)
+                               break;
+               }
+
+               /* "--cpu odd" is not documented */
+               if (strncmp(startp, "odd", 3) == 0) {
+                       for (cpu = 1; cpu <= max_cpu_num; cpu += 2) {
+                               if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+                                       CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+                       }
+                       startp += 3;
+                       if (*startp == 0)
+                               break;
+               }
+
+               cpu = strtol(startp, &endp, 10);
+               if (startp == endp)
+                       errx(1, "--cpu cpu-set: confused by '%s'", startp);
+               if (cpu > max_cpu_num)
+                       errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
+               CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+               startp = endp;
+       }
+
+       validate_cpu_selected_set();
+
+}
+
+void parse_cmdline_pkg(char *s)
+{
+       char *startp, *endp;
+       int pkg = 0;
+
+       if (cpu_selected_set) {
+               usage();
+               errx(1, "--pkg | --cpu");
+       }
+       pkg_selected_set = 0;
+
+       for (startp = s; startp && *startp;) {
+
+               if (*startp == ',') {
+                       startp++;
+                       continue;
+               }
+
+               if (*startp == '-') {
+                       int end_pkg;
+
+                       startp++;
+                       end_pkg = strtol(startp, &endp, 10);
+                       if (startp == endp)
+                               continue;
+
+                       while (pkg <= end_pkg) {
+                               if (pkg > max_pkg_num)
+                                       errx(1, "Requested pkg%d exceeds max pkg%d", pkg, max_pkg_num);
+                               pkg_selected_set |= 1 << pkg;
+                               pkg++;
+                       }
+                       startp = endp;
+                       continue;
+               }
+
+               if (strncmp(startp, "all", 3) == 0) {
+                       pkg_selected_set = pkg_present_set;
+                       return;
+               }
+
+               pkg = strtol(startp, &endp, 10);
+               if (pkg > max_pkg_num)
+                       errx(1, "Requested pkg%d Exceeds max pkg%d", pkg, max_pkg_num);
+               pkg_selected_set |= 1 << pkg;
+               startp = endp;
+       }
+}
+
+void for_packages(unsigned long long pkg_set, int (func)(int))
+{
+       int pkg_num;
+
+       for (pkg_num = 0; pkg_num <= max_pkg_num; ++pkg_num) {
+               if (pkg_set & (1UL << pkg_num))
+                       func(pkg_num);
+       }
+}
+
+void print_version(void)
+{
+       printf("x86_energy_perf_policy 17.05.11 (C) Len Brown <len.brown@intel.com>\n");
+}
 
 void cmdline(int argc, char **argv)
 {
        int opt;
+       int option_index = 0;
+
+       static struct option long_options[] = {
+               {"all",         required_argument,      0, 'a'},
+               {"cpu",         required_argument,      0, 'c'},
+               {"pkg",         required_argument,      0, 'p'},
+               {"debug",       no_argument,            0, 'd'},
+               {"hwp-desired", required_argument,      0, 'D'},
+               {"epb", required_argument,      0, 'B'},
+               {"force",       no_argument,    0, 'f'},
+               {"hwp-enable",  no_argument,    0, 'e'},
+               {"help",        no_argument,    0, 'h'},
+               {"hwp-epp",     required_argument,      0, 'P'},
+               {"hwp-min",     required_argument,      0, 'm'},
+               {"hwp-max",     required_argument,      0, 'M'},
+               {"read",        no_argument,            0, 'r'},
+               {"turbo-enable",        required_argument,      0, 't'},
+               {"hwp-use-pkg", required_argument,      0, 'u'},
+               {"version",     no_argument,            0, 'v'},
+               {"hwp-window",  required_argument,      0, 'w'},
+               {0,             0,                      0, 0 }
+       };
 
        progname = argv[0];
 
-       while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
+       while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+                               long_options, &option_index)) != -1) {
                switch (opt) {
+               case 'a':
+                       parse_cmdline_all(optarg);
+                       break;
+               case 'B':
+                       new_epb = parse_cmdline_epb(parse_optarg_string(optarg));
+                       break;
                case 'c':
-                       cpu = atoi(optarg);
+                       parse_cmdline_cpu(optarg);
+                       break;
+               case 'e':
+                       update_hwp_enable = 1;
+                       break;
+               case 'h':
+                       usage();
+                       break;
+               case 'd':
+                       debug++;
+                       verbose++;
+                       break;
+               case 'f':
+                       force++;
+                       break;
+               case 'D':
+                       req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(optarg));
+                       break;
+               case 'm':
+                       req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(optarg));
+                       break;
+               case 'M':
+                       req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(optarg));
+                       break;
+               case 'p':
+                       parse_cmdline_pkg(optarg);
+                       break;
+               case 'P':
+                       req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(optarg));
                        break;
                case 'r':
-                       read_only = 1;
+                       /* v1 used -r to specify read-only mode, now the default */
+                       break;
+               case 't':
+                       turbo_update_value = parse_cmdline_turbo(parse_optarg_string(optarg));
+                       break;
+               case 'u':
+                       update_hwp_use_pkg++;
+                       if (atoi(optarg) == 0)
+                               req_update.hwp_use_pkg = 0;
+                       else
+                               req_update.hwp_use_pkg = 1;
                        break;
                case 'v':
-                       verbose++;
+                       print_version();
+                       exit(0);
+                       break;
+               case 'w':
+                       req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(optarg));
                        break;
                default:
                        usage();
                }
        }
-       /* if -r, then should be no additional optind */
-       if (read_only && (argc > optind))
-               usage();
-
        /*
-        * if no -r , then must be one additional optind
+        * v1 allowed "performance"|"normal"|"power" with no policy specifier
+        * to update BIAS.  Continue to support that, even though no longer documented.
         */
-       if (!read_only) {
+       if (argc == optind + 1)
+               new_epb = parse_cmdline_epb(parse_optarg_string(argv[optind]));
 
-               if (argc != optind + 1) {
-                       printf("must supply -r or policy param\n");
-                       usage();
-                       }
+       if (argc > optind + 1) {
+               fprintf(stderr, "stray parameter '%s'\n", argv[optind + 1]);
+               usage();
+       }
+}
 
-               if (!strcmp("performance", argv[optind])) {
-                       new_bias = BIAS_PERFORMANCE;
-               } else if (!strcmp("normal", argv[optind])) {
-                       new_bias = BIAS_BALANCE;
-               } else if (!strcmp("powersave", argv[optind])) {
-                       new_bias = BIAS_POWERSAVE;
-               } else {
-                       char *endptr;
-
-                       new_bias = strtoull(argv[optind], &endptr, 0);
-                       if (endptr == argv[optind] ||
-                               new_bias > BIAS_POWERSAVE) {
-                                       fprintf(stderr, "invalid value: %s\n",
-                                               argv[optind]);
-                               usage();
-                       }
-               }
+
+int get_msr(int cpu, int offset, unsigned long long *msr)
+{
+       int retval;
+       char pathname[32];
+       int fd;
+
+       sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+       fd = open(pathname, O_RDONLY);
+       if (fd < 0)
+               err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+
+       retval = pread(fd, msr, sizeof(*msr), offset);
+       if (retval != sizeof(*msr))
+               err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
+
+       if (debug > 1)
+               fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
+
+       close(fd);
+       return 0;
+}
+
+int put_msr(int cpu, int offset, unsigned long long new_msr)
+{
+       char pathname[32];
+       int retval;
+       int fd;
+
+       sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+       fd = open(pathname, O_RDWR);
+       if (fd < 0)
+               err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+
+       retval = pwrite(fd, &new_msr, sizeof(new_msr), offset);
+       if (retval != sizeof(new_msr))
+               err(-2, "pwrite(cpu%d, offset 0x%x, 0x%llx) = %d", cpu, offset, new_msr, retval);
+
+       close(fd);
+
+       if (debug > 1)
+               fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
+
+       return 0;
+}
+
+void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
+{
+       if (cpu != -1)
+               printf("cpu%d: ", cpu);
+
+       printf("HWP_CAP: low %d eff %d guar %d high %d\n",
+               cap->lowest, cap->efficient, cap->guaranteed, cap->highest);
+}
+void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
+{
+       unsigned long long msr;
+
+       get_msr(cpu, msr_offset, &msr);
+
+       cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
+       cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
+       cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
+       cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
+}
+
+void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
+{
+       if (cpu != -1)
+               printf("cpu%d: ", cpu);
+
+       if (str)
+               printf("%s", str);
+
+       printf("HWP_REQ: min %d max %d des %d epp %d window 0x%x (%d*10^%dus) use_pkg %d\n",
+               h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
+               h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg);
+}
+void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
+{
+       printf("pkg%d: ", pkg);
+
+       if (str)
+               printf("%s", str);
+
+       printf("HWP_REQ_PKG: min %d max %d des %d epp %d window 0x%x (%d*10^%dus)\n",
+               h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
+               h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
+}
+void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+{
+       unsigned long long msr;
+
+       get_msr(cpu, msr_offset, &msr);
+
+       hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
+       hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
+       hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
+       hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
+       hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
+       hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
+}
+
+void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+{
+       unsigned long long msr = 0;
+
+       if (debug > 1)
+               printf("cpu%d: requesting min %d max %d des %d epp %d window 0x%0x use_pkg %d\n",
+                       cpu, hwp_req->hwp_min, hwp_req->hwp_max,
+                       hwp_req->hwp_desired, hwp_req->hwp_epp,
+                       hwp_req->hwp_window, hwp_req->hwp_use_pkg);
+
+       msr |= HWP_MIN_PERF(ratio_2_msr_perf(hwp_req->hwp_min));
+       msr |= HWP_MAX_PERF(ratio_2_msr_perf(hwp_req->hwp_max));
+       msr |= HWP_DESIRED_PERF(ratio_2_msr_perf(hwp_req->hwp_desired));
+       msr |= HWP_ENERGY_PERF_PREFERENCE(hwp_req->hwp_epp);
+       msr |= HWP_ACTIVITY_WINDOW(hwp_req->hwp_window);
+       msr |= HWP_PACKAGE_CONTROL(hwp_req->hwp_use_pkg);
+
+       put_msr(cpu, msr_offset, msr);
+}
+
+int print_cpu_msrs(int cpu)
+{
+       unsigned long long msr;
+       struct msr_hwp_request req;
+       struct msr_hwp_cap cap;
+
+       if (has_epb) {
+               get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
+
+               printf("cpu%d: EPB %u\n", cpu, (unsigned int) msr);
        }
+
+       if (!has_hwp)
+               return 0;
+
+       read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
+       print_hwp_request(cpu, &req, "");
+
+       read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+       print_hwp_cap(cpu, &cap, "");
+
+       return 0;
+}
+
+int print_pkg_msrs(int pkg)
+{
+       struct msr_hwp_request req;
+       unsigned long long msr;
+
+       if (!has_hwp)
+               return 0;
+
+       read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
+       print_hwp_request_pkg(pkg, &req, "");
+
+       if (has_hwp_notify) {
+               get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
+               fprintf(stderr,
+               "pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
+               pkg, msr,
+               ((msr) & 0x2) ? "EN" : "Dis",
+               ((msr) & 0x1) ? "EN" : "Dis");
+       }
+       get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
+       fprintf(stderr,
+               "pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n",
+               pkg, msr,
+               ((msr) & 0x4) ? "" : "No-",
+               ((msr) & 0x1) ? "" : "No-");
+
+       return 0;
 }
 
 /*
- * validate_cpuid()
- * returns on success, quietly exits on failure (make verbose with -v)
+ * Assumption: All HWP systems have 100 MHz bus clock
  */
-void validate_cpuid(void)
+int ratio_2_sysfs_khz(int ratio)
 {
-       unsigned int eax, ebx, ecx, edx, max_level;
-       unsigned int fms, family, model, stepping;
+       int bclk_khz = 100 * 1000;      /* 100,000 KHz = 100 MHz */
 
-       eax = ebx = ecx = edx = 0;
+       return ratio * bclk_khz;
+}
+/*
+ * If HWP is enabled and cpufreq sysfs attribtes are present,
+ * then update sysfs, so that it will not become
+ * stale when we write to MSRs.
+ * (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq,
+ *  so we don't have to touch that.)
+ */
+void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio)
+{
+       char pathname[64];
+       FILE *fp;
+       int retval;
+       int khz;
 
-       asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
-               "=d" (edx) : "a" (0));
+       sprintf(pathname, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_%s_freq",
+               cpu, is_max ? "max" : "min");
 
-       if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
-               if (verbose)
-                       fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
-                               (char *)&ebx, (char *)&edx, (char *)&ecx);
-               exit(1);
+       fp = fopen(pathname, "w");
+       if (!fp) {
+               if (debug)
+                       perror(pathname);
+               return;
        }
 
-       asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
-       family = (fms >> 8) & 0xf;
-       model = (fms >> 4) & 0xf;
-       stepping = fms & 0xf;
-       if (family == 6 || family == 0xf)
-               model += ((fms >> 16) & 0xf) << 4;
+       khz = ratio_2_sysfs_khz(ratio);
+       retval = fprintf(fp, "%d", khz);
+       if (retval < 0)
+               if (debug)
+                       perror("fprintf");
+       if (debug)
+               printf("echo %d > %s\n", khz, pathname);
 
-       if (verbose > 1)
-               printf("CPUID %d levels family:model:stepping "
-                       "0x%x:%x:%x (%d:%d:%d)\n", max_level,
-                       family, model, stepping, family, model, stepping);
+       fclose(fp);
+}
 
-       if (!(edx & (1 << 5))) {
-               if (verbose)
-                       printf("CPUID: no MSR\n");
-               exit(1);
+/*
+ * We update all sysfs before updating any MSRs because of
+ * bugs in cpufreq/intel_pstate where the sysfs writes
+ * for a CPU may change the min/max values on other CPUS.
+ */
+
+int update_sysfs(int cpu)
+{
+       if (!has_hwp)
+               return 0;
+
+       if (!hwp_update_enabled())
+               return 0;
+
+       if (access("/sys/devices/system/cpu/cpu0/cpufreq", F_OK))
+               return 0;
+
+       if (update_hwp_min)
+               update_cpufreq_scaling_freq(0, cpu, req_update.hwp_min);
+
+       if (update_hwp_max)
+               update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
+
+       return 0;
+}
+
+int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
+{
+       /* fail if min > max requested */
+       if (req->hwp_min > req->hwp_max) {
+               errx(1, "cpu%d: requested hwp-min %d > hwp_max %d",
+                       cpu, req->hwp_min, req->hwp_max);
        }
 
-       /*
-        * Support for MSR_IA32_ENERGY_PERF_BIAS
-        * is indicated by CPUID.06H.ECX.bit3
-        */
-       asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
-       if (verbose)
-               printf("CPUID.06H.ECX: 0x%x\n", ecx);
-       if (!(ecx & (1 << 3))) {
-               if (verbose)
-                       printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
-               exit(1);
+       /* fail if desired > max requestd */
+       if (req->hwp_desired && (req->hwp_desired > req->hwp_max)) {
+               errx(1, "cpu%d: requested hwp-desired %d > hwp_max %d",
+                       cpu, req->hwp_desired, req->hwp_max);
        }
-       return; /* success */
+       /* fail if desired < min requestd */
+       if (req->hwp_desired && (req->hwp_desired < req->hwp_min)) {
+               errx(1, "cpu%d: requested hwp-desired %d < requested hwp_min %d",
+                       cpu, req->hwp_desired, req->hwp_min);
+       }
+
+       return 0;
 }
 
-unsigned long long get_msr(int cpu, int offset)
+int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap)
 {
-       unsigned long long msr;
-       char msr_path[32];
-       int retval;
-       int fd;
+       if (update_hwp_max) {
+               if (req->hwp_max > cap->highest)
+                       errx(1, "cpu%d: requested max %d > capabilities highest %d, use --force?",
+                               cpu, req->hwp_max, cap->highest);
+               if (req->hwp_max < cap->lowest)
+                       errx(1, "cpu%d: requested max %d < capabilities lowest %d, use --force?",
+                               cpu, req->hwp_max, cap->lowest);
+       }
 
-       sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
-       fd = open(msr_path, O_RDONLY);
-       if (fd < 0) {
-               printf("Try \"# modprobe msr\"\n");
-               perror(msr_path);
-               exit(1);
+       if (update_hwp_min) {
+               if (req->hwp_min > cap->highest)
+                       errx(1, "cpu%d: requested min %d > capabilities highest %d, use --force?",
+                               cpu, req->hwp_min, cap->highest);
+               if (req->hwp_min < cap->lowest)
+                       errx(1, "cpu%d: requested min %d < capabilities lowest %d, use --force?",
+                               cpu, req->hwp_min, cap->lowest);
        }
 
-       retval = pread(fd, &msr, sizeof msr, offset);
+       if (update_hwp_min && update_hwp_max && (req->hwp_min > req->hwp_max))
+               errx(1, "cpu%d: requested min %d > requested max %d",
+                       cpu, req->hwp_min, req->hwp_max);
 
-       if (retval != sizeof msr) {
-               printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
-               exit(-2);
+       if (update_hwp_desired && req->hwp_desired) {
+               if (req->hwp_desired > req->hwp_max)
+                       errx(1, "cpu%d: requested desired %d > requested max %d, use --force?",
+                               cpu, req->hwp_desired, req->hwp_max);
+               if (req->hwp_desired < req->hwp_min)
+                       errx(1, "cpu%d: requested desired %d < requested min %d, use --force?",
+                               cpu, req->hwp_desired, req->hwp_min);
+               if (req->hwp_desired < cap->lowest)
+                       errx(1, "cpu%d: requested desired %d < capabilities lowest %d, use --force?",
+                               cpu, req->hwp_desired, cap->lowest);
+               if (req->hwp_desired > cap->highest)
+                       errx(1, "cpu%d: requested desired %d > capabilities highest %d, use --force?",
+                               cpu, req->hwp_desired, cap->highest);
        }
-       close(fd);
-       return msr;
+
+       return 0;
 }
 
-unsigned long long  put_msr(int cpu, unsigned long long new_msr, int offset)
+int update_hwp_request(int cpu)
 {
-       unsigned long long old_msr;
-       char msr_path[32];
-       int retval;
-       int fd;
+       struct msr_hwp_request req;
+       struct msr_hwp_cap cap;
+
+       int msr_offset = MSR_HWP_REQUEST;
+
+       read_hwp_request(cpu, &req, msr_offset);
+       if (debug)
+               print_hwp_request(cpu, &req, "old: ");
+
+       if (update_hwp_min)
+               req.hwp_min = req_update.hwp_min;
+
+       if (update_hwp_max)
+               req.hwp_max = req_update.hwp_max;
+
+       if (update_hwp_desired)
+               req.hwp_desired = req_update.hwp_desired;
+
+       if (update_hwp_window)
+               req.hwp_window = req_update.hwp_window;
+
+       if (update_hwp_epp)
+               req.hwp_epp = req_update.hwp_epp;
+
+       req.hwp_use_pkg = req_update.hwp_use_pkg;
+
+       read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+       if (debug)
+               print_hwp_cap(cpu, &cap, "");
+
+       if (!force)
+               check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
+
+       verify_hwp_req_self_consistency(cpu, &req);
 
-       sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
-       fd = open(msr_path, O_RDWR);
-       if (fd < 0) {
-               perror(msr_path);
-               exit(1);
+       write_hwp_request(cpu, &req, msr_offset);
+
+       if (debug) {
+               read_hwp_request(cpu, &req, msr_offset);
+               print_hwp_request(cpu, &req, "new: ");
        }
+       return 0;
+}
+int update_hwp_request_pkg(int pkg)
+{
+       struct msr_hwp_request req;
+       struct msr_hwp_cap cap;
+       int cpu = first_cpu_in_pkg[pkg];
+
+       int msr_offset = MSR_HWP_REQUEST_PKG;
+
+       read_hwp_request(cpu, &req, msr_offset);
+       if (debug)
+               print_hwp_request_pkg(pkg, &req, "old: ");
+
+       if (update_hwp_min)
+               req.hwp_min = req_update.hwp_min;
+
+       if (update_hwp_max)
+               req.hwp_max = req_update.hwp_max;
+
+       if (update_hwp_desired)
+               req.hwp_desired = req_update.hwp_desired;
+
+       if (update_hwp_window)
+               req.hwp_window = req_update.hwp_window;
+
+       if (update_hwp_epp)
+               req.hwp_epp = req_update.hwp_epp;
+
+       read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+       if (debug)
+               print_hwp_cap(cpu, &cap, "");
+
+       if (!force)
+               check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
+
+       verify_hwp_req_self_consistency(cpu, &req);
+
+       write_hwp_request(cpu, &req, msr_offset);
 
-       retval = pread(fd, &old_msr, sizeof old_msr, offset);
-       if (retval != sizeof old_msr) {
-               perror("pwrite");
-               printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
-               exit(-2);
+       if (debug) {
+               read_hwp_request(cpu, &req, msr_offset);
+               print_hwp_request_pkg(pkg, &req, "new: ");
        }
+       return 0;
+}
+
+int enable_hwp_on_cpu(int cpu)
+{
+       unsigned long long msr;
+
+       get_msr(cpu, MSR_PM_ENABLE, &msr);
+       put_msr(cpu, MSR_PM_ENABLE, 1);
+
+       if (verbose)
+               printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1);
+
+       return 0;
+}
+
+int update_cpu_msrs(int cpu)
+{
+       unsigned long long msr;
+
 
-       retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
-       if (retval != sizeof new_msr) {
-               perror("pwrite");
-               printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
-               exit(-2);
+       if (update_epb) {
+               get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
+               put_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, new_epb);
+
+               if (verbose)
+                       printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
+                               cpu, (unsigned int) msr, (unsigned int) new_epb);
        }
 
-       close(fd);
+       if (update_turbo) {
+               int turbo_is_present_and_disabled;
+
+               get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
+
+               turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0);
+
+               if (turbo_update_value == 1)    {
+                       if (turbo_is_present_and_disabled) {
+                               msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+                               put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+                               if (verbose)
+                                       printf("cpu%d: turbo ENABLE\n", cpu);
+                       }
+               } else {
+                       /*
+                        * if "turbo_is_enabled" were known to be describe this cpu
+                        * then we could use it here to skip redundant disable requests.
+                        * but cpu may be in a different package, so we always write.
+                        */
+                       msr |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+                       put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+                       if (verbose)
+                               printf("cpu%d: turbo DISABLE\n", cpu);
+               }
+       }
+
+       if (!has_hwp)
+               return 0;
+
+       if (!hwp_update_enabled())
+               return 0;
+
+       update_hwp_request(cpu);
+       return 0;
+}
+
+/*
+ * Open a file, and exit on failure
+ */
+FILE *fopen_or_die(const char *path, const char *mode)
+{
+       FILE *filep = fopen(path, "r");
 
-       return old_msr;
+       if (!filep)
+               err(1, "%s: open failed", path);
+       return filep;
 }
 
-void print_msr(int cpu)
+unsigned int get_pkg_num(int cpu)
 {
-       printf("cpu%d: 0x%016llx\n",
-               cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
+       FILE *fp;
+       char pathname[128];
+       unsigned int pkg;
+       int retval;
+
+       sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
+
+       fp = fopen_or_die(pathname, "r");
+       retval = fscanf(fp, "%d\n", &pkg);
+       if (retval != 1)
+               errx(1, "%s: failed to parse", pathname);
+       return pkg;
 }
 
-void update_msr(int cpu)
+int set_max_cpu_pkg_num(int cpu)
 {
-       unsigned long long previous_msr;
+       unsigned int pkg;
 
-       previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
+       if (max_cpu_num < cpu)
+               max_cpu_num = cpu;
 
-       if (verbose)
-               printf("cpu%d  msr0x%x 0x%016llx -> 0x%016llx\n",
-                       cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
+       pkg = get_pkg_num(cpu);
+
+       if (pkg >= MAX_PACKAGES)
+               errx(1, "cpu%d: %d >= MAX_PACKAGES (%d)", cpu, pkg, MAX_PACKAGES);
+
+       if (pkg > max_pkg_num)
+               max_pkg_num = pkg;
 
-       return;
+       if ((pkg_present_set & (1ULL << pkg)) == 0) {
+               pkg_present_set |= (1ULL << pkg);
+               first_cpu_in_pkg[pkg] = cpu;
+       }
+
+       return 0;
+}
+int mark_cpu_present(int cpu)
+{
+       CPU_SET_S(cpu, cpu_setsize, cpu_present_set);
+       return 0;
 }
 
-char *proc_stat = "/proc/stat";
 /*
- * run func() on every cpu in /dev/cpu
+ * run func(cpu) on every cpu in /proc/stat
+ * return max_cpu number
  */
-void for_every_cpu(void (func)(int))
+int for_all_proc_cpus(int (func)(int))
 {
        FILE *fp;
+       int cpu_num;
        int retval;
 
-       fp = fopen(proc_stat, "r");
-       if (fp == NULL) {
-               perror(proc_stat);
-               exit(1);
-       }
+       fp = fopen_or_die(proc_stat, "r");
 
        retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
-       if (retval != 0) {
-               perror("/proc/stat format");
-               exit(1);
-       }
+       if (retval != 0)
+               err(1, "%s: failed to parse format", proc_stat);
 
        while (1) {
-               int cpu;
-
-               retval = fscanf(fp,
-                       "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
-                       &cpu);
+               retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
                if (retval != 1)
                        break;
 
-               func(cpu);
+               retval = func(cpu_num);
+               if (retval) {
+                       fclose(fp);
+                       return retval;
+               }
        }
        fclose(fp);
+       return 0;
+}
+
+void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+{
+       int cpu_num;
+
+       for (cpu_num = 0; cpu_num <= max_cpu_num; ++cpu_num)
+               if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
+                       func(cpu_num);
+}
+
+void init_data_structures(void)
+{
+       for_all_proc_cpus(set_max_cpu_pkg_num);
+
+       cpu_setsize = CPU_ALLOC_SIZE((max_cpu_num + 1));
+
+       cpu_present_set = CPU_ALLOC((max_cpu_num + 1));
+       if (cpu_present_set == NULL)
+               err(3, "CPU_ALLOC");
+       CPU_ZERO_S(cpu_setsize, cpu_present_set);
+       for_all_proc_cpus(mark_cpu_present);
+}
+
+/* clear has_hwp if it is not enable (or being enabled) */
+
+void verify_hwp_is_enabled(void)
+{
+       unsigned long long msr;
+
+       if (!has_hwp)   /* set in early_cpuid() */
+               return;
+
+       /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
+       get_msr(base_cpu, MSR_PM_ENABLE, &msr);
+       if ((msr & 1) == 0) {
+               fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
+               has_hwp = 0;
+               return;
+       }
+}
+
+int req_update_bounds_check(void)
+{
+       if (!hwp_update_enabled())
+               return 0;
+
+       /* fail if min > max requested */
+       if ((update_hwp_max && update_hwp_min) &&
+           (req_update.hwp_min > req_update.hwp_max)) {
+               printf("hwp-min %d > hwp_max %d\n", req_update.hwp_min, req_update.hwp_max);
+               return -EINVAL;
+       }
+
+       /* fail if desired > max requestd */
+       if (req_update.hwp_desired && update_hwp_max &&
+           (req_update.hwp_desired > req_update.hwp_max)) {
+               printf("hwp-desired cannot be greater than hwp_max\n");
+               return -EINVAL;
+       }
+       /* fail if desired < min requestd */
+       if (req_update.hwp_desired && update_hwp_min &&
+           (req_update.hwp_desired < req_update.hwp_min)) {
+               printf("hwp-desired cannot be less than hwp_min\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void set_base_cpu(void)
+{
+       base_cpu = sched_getcpu();
+       if (base_cpu < 0)
+               err(-ENODEV, "No valid cpus found");
+}
+
+
+void probe_dev_msr(void)
+{
+       struct stat sb;
+       char pathname[32];
+
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (stat(pathname, &sb))
+               if (system("/sbin/modprobe msr > /dev/null 2>&1"))
+                       err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+}
+/*
+ * early_cpuid()
+ * initialize turbo_is_enabled, has_hwp, has_epb
+ * before cmdline is parsed
+ */
+void early_cpuid(void)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+       unsigned int fms, family, model;
+
+       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+
+       if (max_level < 6)
+               errx(1, "Processor not supported\n");
+
+       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       family = (fms >> 8) & 0xf;
+       model = (fms >> 4) & 0xf;
+       if (family == 6 || family == 0xf)
+               model += ((fms >> 16) & 0xf) << 4;
+
+       if (model == 0x4F) {
+               unsigned long long msr;
+
+               get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
+
+               bdx_highest_ratio = msr & 0xFF;
+       }
+
+       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       turbo_is_enabled = (eax >> 1) & 1;
+       has_hwp = (eax >> 7) & 1;
+       has_epb = (ecx >> 3) & 1;
+}
+
+/*
+ * parse_cpuid()
+ * set
+ * has_hwp, has_hwp_notify, has_hwp_activity_window, has_hwp_epp, has_hwp_request_pkg, has_epb
+ */
+void parse_cpuid(void)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+       unsigned int fms, family, model, stepping;
+
+       eax = ebx = ecx = edx = 0;
+
+       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+
+       if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+               genuine_intel = 1;
+
+       if (debug)
+               fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
+                       (char *)&ebx, (char *)&edx, (char *)&ecx);
+
+       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       family = (fms >> 8) & 0xf;
+       model = (fms >> 4) & 0xf;
+       stepping = fms & 0xf;
+       if (family == 6 || family == 0xf)
+               model += ((fms >> 16) & 0xf) << 4;
+
+       if (debug) {
+               fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+                       max_level, family, model, stepping, family, model, stepping);
+               fprintf(stderr, "CPUID(1): %s %s %s %s %s %s %s %s\n",
+                       ecx & (1 << 0) ? "SSE3" : "-",
+                       ecx & (1 << 3) ? "MONITOR" : "-",
+                       ecx & (1 << 7) ? "EIST" : "-",
+                       ecx & (1 << 8) ? "TM2" : "-",
+                       edx & (1 << 4) ? "TSC" : "-",
+                       edx & (1 << 5) ? "MSR" : "-",
+                       edx & (1 << 22) ? "ACPI-TM" : "-",
+                       edx & (1 << 29) ? "TM" : "-");
+       }
+
+       if (!(edx & (1 << 5)))
+               errx(1, "CPUID: no MSR");
+
+
+       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       /* turbo_is_enabled already set */
+       /* has_hwp already set */
+       has_hwp_notify = eax & (1 << 8);
+       has_hwp_activity_window = eax & (1 << 9);
+       has_hwp_epp = eax & (1 << 10);
+       has_hwp_request_pkg = eax & (1 << 11);
+
+       if (!has_hwp_request_pkg && update_hwp_use_pkg)
+               errx(1, "--hwp-use-pkg is not available on this hardware");
+
+       /* has_epb already set */
+
+       if (debug)
+               fprintf(stderr,
+                       "CPUID(6): %sTURBO, %sHWP, %sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
+                       turbo_is_enabled ? "" : "No-",
+                       has_hwp ? "" : "No-",
+                       has_hwp_notify ? "" : "No-",
+                       has_hwp_activity_window ? "" : "No-",
+                       has_hwp_epp ? "" : "No-",
+                       has_hwp_request_pkg ? "" : "No-",
+                       has_epb ? "" : "No-");
+
+       return; /* success */
 }
 
 int main(int argc, char **argv)
 {
+       set_base_cpu();
+       probe_dev_msr();
+       init_data_structures();
+
+       early_cpuid();  /* initial cpuid parse before cmdline */
+
        cmdline(argc, argv);
 
-       if (verbose > 1)
-               printf("x86_energy_perf_policy Nov 24, 2010"
-                               " - Len Brown <lenb@kernel.org>\n");
-       if (verbose > 1 && !read_only)
-               printf("new_bias %lld\n", new_bias);
-
-       validate_cpuid();
-
-       if (cpu != -1) {
-               if (read_only)
-                       print_msr(cpu);
-               else
-                       update_msr(cpu);
-       } else {
-               if (read_only)
-                       for_every_cpu(print_msr);
-               else
-                       for_every_cpu(update_msr);
+       if (debug)
+               print_version();
+
+       parse_cpuid();
+
+        /* If CPU-set and PKG-set are not initialized, default to all CPUs */
+       if ((cpu_selected_set == 0) && (pkg_selected_set == 0))
+               cpu_selected_set = cpu_present_set;
+
+       /*
+        * If HWP is being enabled, do it now, so that subsequent operations
+        * that access HWP registers can work.
+        */
+       if (update_hwp_enable)
+               for_all_cpus_in_set(cpu_setsize, cpu_selected_set, enable_hwp_on_cpu);
+
+       /* If HWP present, but disabled, warn and ignore from here forward */
+       verify_hwp_is_enabled();
+
+       if (req_update_bounds_check())
+               return -EINVAL;
+
+       /* display information only, no updates to settings */
+       if (!update_epb && !update_turbo && !hwp_update_enabled()) {
+               if (cpu_selected_set)
+                       for_all_cpus_in_set(cpu_setsize, cpu_selected_set, print_cpu_msrs);
+
+               if (has_hwp_request_pkg) {
+                       if (pkg_selected_set == 0)
+                               pkg_selected_set = pkg_present_set;
+
+                       for_packages(pkg_selected_set, print_pkg_msrs);
+               }
+
+               return 0;
        }
 
+       /* update CPU set */
+       if (cpu_selected_set) {
+               for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_sysfs);
+               for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_msrs);
+       } else if (pkg_selected_set)
+               for_packages(pkg_selected_set, update_hwp_request_pkg);
+
        return 0;
 }
index 64cae1a5deff956637a05b08c25565fe1718611b..e1f75a1914a15491b79ec95b0f18c6617565873b 100644 (file)
@@ -370,7 +370,7 @@ acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
 
-union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
                u64 rev, u64 func, union acpi_object *argv4)
 {
        union acpi_object *obj = ERR_PTR(-ENXIO);
@@ -379,11 +379,11 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
        rcu_read_lock();
        ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
        if (ops)
-               obj = ops->evaluate_dsm(handle, uuid, rev, func, argv4);
+               obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
        rcu_read_unlock();
 
        if (IS_ERR(obj))
-               return acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+               return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
        return obj;
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
index c2187178fb13335ce8c54c1787949e85871494aa..28859da78edfe7fbedfea914a2a5788e143f5861 100644 (file)
@@ -1559,7 +1559,7 @@ static unsigned long nfit_ctl_handle;
 union acpi_object *result;
 
 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
-               const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
+               const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
 {
        if (handle != &nfit_ctl_handle)
                return ERR_PTR(-ENXIO);
index f54c0032c6ff3a34e14504aa8fa5f219148267a3..d3d63dd5ed38e4d93d5d1edb4db6cc04fde9719f 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
 #include <linux/list.h>
+#include <linux/uuid.h>
 #include <linux/ioport.h>
 #include <linux/spinlock_types.h>
 
@@ -36,7 +37,8 @@ typedef void *acpi_handle;
 
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
-               const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4);
+                const guid_t *guid, u64 rev, u64 func,
+                union acpi_object *argv4);
 void __iomem *__wrap_ioremap_nocache(resource_size_t offset,
                unsigned long size);
 void __wrap_iounmap(volatile void __iomem *addr);
index a676d3eefefbdd4b239d394a9b057f15c6a2cd7f..13f5198ba0ee737819b24cbc3538cc4b97e8d2f2 100755 (executable)
@@ -305,7 +305,7 @@ function perf_test()
        echo "Running remote perf test $WITH DMA"
        write_file "" $REMOTE_PERF/run
        echo -n "  "
-       read_file $LOCAL_PERF/run
+       read_file $REMOTE_PERF/run
        echo "  Passed"
 
        _modprobe -r ntb_perf