%ps versatile_init
%pB prev_fn_of_versatile_init+0x88/0x88
-For printing symbols and function pointers. The ``S`` and ``s`` specifiers
-result in the symbol name with (``S``) or without (``s``) offsets. Where
-this is used on a kernel without KALLSYMS - the symbol address is
-printed instead.
+The ``F`` and ``f`` specifiers are for printing function pointers,
+for example, f->func, &gettimeofday. They have the same result as
+``S`` and ``s`` specifiers. But they do an extra conversion on
+ia64, ppc64 and parisc64 architectures where the function pointers
+are actually function descriptors.
+
+The ``S`` and ``s`` specifiers can be used for printing symbols
+from direct addresses, for example, __builtin_return_address(0),
+(void *)regs->ip. They result in the symbol name with (``S``) or
+without (``s``) offsets. If KALLSYMS are disabled then the symbol
+address is printed instead.
The ``B`` specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
when tail-call``s are used and marked with the noreturn GCC attribute.
-On ia64, ppc64 and parisc64 architectures function pointers are
-actually function descriptors which must first be resolved. The ``F`` and
-``f`` specifiers perform this resolution and then provide the same
-functionality as the ``S`` and ``s`` specifiers.
Kernel Pointers
===============
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
-T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core
F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Fearless Coyote
# *DOCUMENTATION*
#address-cells = <1>;
#size-cells = <1>;
status = "disabled";
+ ranges;
adc: adc@50030800 {
compatible = "fsl,imx25-gcq";
pinctrl_pcie: pciegrp {
fsl,pins = <
/* PCIe reset */
- MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
+ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
>;
};
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
- reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
+ reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
>;
};
+ pinctrl_spi4: spi4grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
+ MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
+ MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
+ >;
+ };
+
pinctrl_tsc2046_pendown: tsc2046_pendown {
fsl,pins = <
MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0
>;
-
- pinctrl_spi4: spi4grp {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
- MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
- MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
- >;
- };
};
};
#size-cells = <1>;
atmel,smc = <&hsmc>;
reg = <0x10000000 0x10000000
- 0x40000000 0x30000000>;
+ 0x60000000 0x30000000>;
ranges = <0x0 0x0 0x10000000 0x10000000
0x1 0x0 0x60000000 0x10000000
0x2 0x0 0x70000000 0x10000000
};
hsmc: hsmc@f8014000 {
- compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+ compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd";
reg = <0xf8014000 0x1000>;
- interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>;
clocks = <&hsmc_clk>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- pmecc: ecc-engine@ffffc070 {
+ pmecc: ecc-engine@f8014070 {
compatible = "atmel,sama5d2-pmecc";
- reg = <0xffffc070 0x490>,
- <0xffffc500 0x100>;
+ reg = <0xf8014070 0x490>,
+ <0xf8014500 0x100>;
};
};
compatible = "sinovoip,bananapi-m64", "allwinner,sun50i-a64";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
};
compatible = "pine64,pine64", "allwinner,sun50i-a64";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
"allwinner,sun50i-a64";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
};
};
&pio {
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
compatible = "allwinner,sun50i-h5-pinctrl";
};
stdout-path = "serial0:115200n8";
};
- audio_clkout: audio_clkout {
+ audio_clkout: audio-clkout {
/*
* This is same as <&rcar_sound 0>
* but needed to avoid cs2000/rcar_sound probe dead-lock
u64 _val; \
if (needs_unstable_timer_counter_workaround()) { \
const struct arch_timer_erratum_workaround *wa; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
wa = __this_cpu_read(timer_unstable_counter_workaround); \
if (wa && wa->read_##reg) \
_val = wa->read_##reg(); \
else \
_val = read_sysreg(reg); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
} else { \
_val = read_sysreg(reg); \
} \
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE 0x100000000UL
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#ifndef __ASSEMBLY__
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+ if (current->thread.regs &&
+ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
check_if_tm_restore_required(current);
/*
* If a thread has already been reclaimed then the
{
if (tsk->thread.regs) {
preempt_disable();
- if (tsk->thread.regs->msr & MSR_VSX) {
+ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current);
giveup_vsx(tsk);
}
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
+ select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
- select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
.set T1, REG_T1
.endm
-#define K_BASE %r8
#define HASH_PTR %r9
+#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
-#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endm
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+ mov \a, RTA
+ add $\d, RTA
+ cmp $\c, \b
+ cmovge RTA, \a
+.endm
+
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
- PRECALC_OFFSET = 0
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
- PRECALC_OFFSET = 128
+
+ /* Go to next block if needed */
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF
.align 32
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
- cmp K_BASE, BUFFER_PTR
- jne _begin
+ test BLOCKS_CTR, BLOCKS_CTR
+ jnz _begin
.align 32
jmp _end
.align 32
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
-
+ /* Update Counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/*
* rounds
* 60,62,64,66,68
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
- je _loop
+ test BLOCKS_CTR, BLOCKS_CTR
+ jz _loop
mov TB, B
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
-
- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
+ /* update counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
avx2_zeroupper
- lea K_XMM_AR(%rip), K_BASE
-
+ /* Setup initial values */
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
- lea 64(BUF), BUFFER_PTR2
-
- shl $6, CNT /* mul by 64 */
- add BUF, CNT
- add $64, CNT
- mov CNT, BUFFER_END
- cmp BUFFER_END, BUFFER_PTR2
- cmovae K_BASE, BUFFER_PTR2
+ mov BUF, BUFFER_PTR2
+ mov CNT, BLOCKS_CTR
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
static bool avx2_usable(void)
{
- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
* other IST entries.
*/
+ ASM_CLAC
+
/* Use %rdx as our temp variable throughout */
pushq %rdx
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
}
-static void x86_pmu_event_mapped(struct perf_event *event)
+static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
+ lockdep_assert_held_exclusive(&mm->mmap_sem);
- if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
-static void x86_pmu_event_unmapped(struct perf_event *event)
+static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
- if (!current->mm)
- return;
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
- if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
struct bts_phys buf[0];
};
-struct pmu bts_pmu;
+static struct pmu bts_pmu;
static size_t buf_size(struct page *page)
{
* P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
* either up to date automatically or not applicable at all.
*/
-struct p4_event_alias {
+static struct p4_event_alias {
u64 original;
u64 alternative;
} p4_event_aliases[] = {
.attrs = rapl_formats_attr,
};
-const struct attribute_group *rapl_attr_groups[] = {
+static const struct attribute_group *rapl_attr_groups[] = {
&rapl_pmu_attr_group,
&rapl_pmu_format_group,
&rapl_pmu_events_group,
NULL,
};
-static struct attribute_group uncore_pmu_attr_group = {
+static const struct attribute_group uncore_pmu_attr_group = {
.attrs = uncore_pmu_attrs,
};
NULL,
};
-static struct attribute_group nhmex_uncore_ubox_format_group = {
+static const struct attribute_group nhmex_uncore_ubox_format_group = {
.name = "format",
.attrs = nhmex_uncore_ubox_formats_attr,
};
NULL,
};
-static struct attribute_group nhmex_uncore_cbox_format_group = {
+static const struct attribute_group nhmex_uncore_cbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_cbox_formats_attr,
};
NULL,
};
-static struct attribute_group nhmex_uncore_bbox_format_group = {
+static const struct attribute_group nhmex_uncore_bbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_bbox_formats_attr,
};
NULL,
};
-static struct attribute_group nhmex_uncore_sbox_format_group = {
+static const struct attribute_group nhmex_uncore_sbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_sbox_formats_attr,
};
NULL,
};
-static struct attribute_group nhmex_uncore_mbox_format_group = {
+static const struct attribute_group nhmex_uncore_mbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_mbox_formats_attr,
};
NULL,
};
-static struct attribute_group nhmex_uncore_rbox_format_group = {
+static const struct attribute_group nhmex_uncore_rbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_rbox_formats_attr,
};
NULL,
};
-static struct attribute_group snb_uncore_format_group = {
+static const struct attribute_group snb_uncore_format_group = {
.name = "format",
.attrs = snb_uncore_formats_attr,
};
NULL,
};
-static struct attribute_group snb_uncore_imc_format_group = {
+static const struct attribute_group snb_uncore_imc_format_group = {
.name = "format",
.attrs = snb_uncore_imc_formats_attr,
};
NULL,
};
-static struct attribute_group nhm_uncore_format_group = {
+static const struct attribute_group nhm_uncore_format_group = {
.name = "format",
.attrs = nhm_uncore_formats_attr,
};
{ /* end: all zeroes */ },
};
-static struct attribute_group snbep_uncore_format_group = {
+static const struct attribute_group snbep_uncore_format_group = {
.name = "format",
.attrs = snbep_uncore_formats_attr,
};
-static struct attribute_group snbep_uncore_ubox_format_group = {
+static const struct attribute_group snbep_uncore_ubox_format_group = {
.name = "format",
.attrs = snbep_uncore_ubox_formats_attr,
};
-static struct attribute_group snbep_uncore_cbox_format_group = {
+static const struct attribute_group snbep_uncore_cbox_format_group = {
.name = "format",
.attrs = snbep_uncore_cbox_formats_attr,
};
-static struct attribute_group snbep_uncore_pcu_format_group = {
+static const struct attribute_group snbep_uncore_pcu_format_group = {
.name = "format",
.attrs = snbep_uncore_pcu_formats_attr,
};
-static struct attribute_group snbep_uncore_qpi_format_group = {
+static const struct attribute_group snbep_uncore_qpi_format_group = {
.name = "format",
.attrs = snbep_uncore_qpi_formats_attr,
};
NULL,
};
-static struct attribute_group ivbep_uncore_format_group = {
+static const struct attribute_group ivbep_uncore_format_group = {
.name = "format",
.attrs = ivbep_uncore_formats_attr,
};
-static struct attribute_group ivbep_uncore_ubox_format_group = {
+static const struct attribute_group ivbep_uncore_ubox_format_group = {
.name = "format",
.attrs = ivbep_uncore_ubox_formats_attr,
};
-static struct attribute_group ivbep_uncore_cbox_format_group = {
+static const struct attribute_group ivbep_uncore_cbox_format_group = {
.name = "format",
.attrs = ivbep_uncore_cbox_formats_attr,
};
-static struct attribute_group ivbep_uncore_pcu_format_group = {
+static const struct attribute_group ivbep_uncore_pcu_format_group = {
.name = "format",
.attrs = ivbep_uncore_pcu_formats_attr,
};
-static struct attribute_group ivbep_uncore_qpi_format_group = {
+static const struct attribute_group ivbep_uncore_qpi_format_group = {
.name = "format",
.attrs = ivbep_uncore_qpi_formats_attr,
};
NULL,
};
-static struct attribute_group knl_uncore_ubox_format_group = {
+static const struct attribute_group knl_uncore_ubox_format_group = {
.name = "format",
.attrs = knl_uncore_ubox_formats_attr,
};
NULL,
};
-static struct attribute_group knl_uncore_cha_format_group = {
+static const struct attribute_group knl_uncore_cha_format_group = {
.name = "format",
.attrs = knl_uncore_cha_formats_attr,
};
NULL,
};
-static struct attribute_group knl_uncore_pcu_format_group = {
+static const struct attribute_group knl_uncore_pcu_format_group = {
.name = "format",
.attrs = knl_uncore_pcu_formats_attr,
};
NULL,
};
-static struct attribute_group knl_uncore_irp_format_group = {
+static const struct attribute_group knl_uncore_irp_format_group = {
.name = "format",
.attrs = knl_uncore_irp_formats_attr,
};
NULL,
};
-static struct attribute_group hswep_uncore_ubox_format_group = {
+static const struct attribute_group hswep_uncore_ubox_format_group = {
.name = "format",
.attrs = hswep_uncore_ubox_formats_attr,
};
NULL,
};
-static struct attribute_group hswep_uncore_cbox_format_group = {
+static const struct attribute_group hswep_uncore_cbox_format_group = {
.name = "format",
.attrs = hswep_uncore_cbox_formats_attr,
};
NULL,
};
-static struct attribute_group hswep_uncore_sbox_format_group = {
+static const struct attribute_group hswep_uncore_sbox_format_group = {
.name = "format",
.attrs = hswep_uncore_sbox_formats_attr,
};
NULL,
};
-static struct attribute_group skx_uncore_chabox_format_group = {
+static const struct attribute_group skx_uncore_chabox_format_group = {
.name = "format",
.attrs = skx_uncore_cha_formats_attr,
};
NULL,
};
-static struct attribute_group skx_uncore_iio_format_group = {
+static const struct attribute_group skx_uncore_iio_format_group = {
.name = "format",
.attrs = skx_uncore_iio_formats_attr,
};
NULL,
};
-static struct attribute_group skx_uncore_format_group = {
+static const struct attribute_group skx_uncore_format_group = {
.name = "format",
.attrs = skx_uncore_formats_attr,
};
NULL,
};
-static struct attribute_group skx_upi_uncore_format_group = {
+static const struct attribute_group skx_upi_uncore_format_group = {
.name = "format",
.attrs = skx_upi_uncore_formats_attr,
};
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
+#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
- 0x100000000UL)
+ (TASK_SIZE / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
ktime_t now = ktime_get();
s64 time_delta = ktime_ms_delta(now, s->time);
+ unsigned long flags;
/* Don't bother re-computing within the cache threshold time. */
if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return;
+ local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ local_irq_restore(flags);
aperf_delta = aperf - s->aperf;
mperf_delta = mperf - s->mperf;
NULL
};
-static struct attribute_group thermal_attr_group = {
+static const struct attribute_group thermal_attr_group = {
.attrs = thermal_throttle_attrs,
.name = "thermal_throttle"
};
NULL
};
-static struct attribute_group mc_attr_group = {
+static const struct attribute_group mc_attr_group = {
.attrs = mc_default_attrs,
.name = "microcode",
};
NULL
};
-static struct attribute_group cpu_root_microcode_group = {
+static const struct attribute_group cpu_root_microcode_group = {
.name = "microcode",
.attrs = cpu_root_microcode_attrs,
};
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
}
+static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
+{
+ struct set_mtrr_data data = { .smp_reg = reg,
+ .smp_base = base,
+ .smp_size = size,
+ .smp_type = type
+ };
+
+ stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
+}
+
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
{
/* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
- set_mtrr(i, base, size, type);
+ set_mtrr_cpuslocked(i, base, size, type);
if (likely(replace < 0)) {
mtrr_usage_table[i] = 1;
} else {
if (increment)
mtrr_usage_table[i]++;
if (unlikely(replace != i)) {
- set_mtrr(replace, 0, 0, 0);
+ set_mtrr_cpuslocked(replace, 0, 0, 0);
mtrr_usage_table[replace] = 0;
}
}
goto out;
}
if (--mtrr_usage_table[reg] < 1)
- set_mtrr(reg, 0, 0, 0);
+ set_mtrr_cpuslocked(reg, 0, 0, 0);
error = reg;
out:
mutex_unlock(&mtrr_mutex);
pudval_t *pud;
pmdval_t *pmd, pmd_entry;
int i;
+ unsigned int *next_pgt_ptr;
/* Is the address too large? */
if (physaddr >> MAX_PHYSMEM_BITS)
* creates a bunch of nonsense entries but that is fine --
* it avoids problems around wraparound.
*/
-
- pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
- pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+ next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
+ pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
+ pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
NULL,
};
-static struct attribute_group boot_params_attr_group = {
+static const struct attribute_group boot_params_attr_group = {
.attrs = boot_params_version_attrs,
.bin_attrs = boot_params_data_attrs,
};
NULL,
};
-static struct attribute_group setup_data_attr_group = {
+static const struct attribute_group setup_data_attr_group = {
.attrs = setup_data_type_attrs,
.bin_attrs = setup_data_data_attrs,
};
* Returns zero if CPU booted OK, else error code from
* ->wakeup_secondary_cpu.
*/
-static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
+ int *cpu0_nmi_registered)
{
volatile u32 *trampoline_status =
(volatile u32 *) __va(real_mode_header->trampoline_status);
unsigned long start_ip = real_mode_header->trampoline_start;
unsigned long boot_error = 0;
- int cpu0_nmi_registered = 0;
unsigned long timeout;
idle->thread.sp = (unsigned long)task_pt_regs(idle);
boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
else
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
- &cpu0_nmi_registered);
+ cpu0_nmi_registered);
if (!boot_error) {
/*
*/
smpboot_restore_warm_reset_vector();
}
- /*
- * Clean up the nmi handler. Do this after the callin and callout sync
- * to avoid impact of possible long unregister time.
- */
- if (cpu0_nmi_registered)
- unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
return boot_error;
}
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int apicid = apic->cpu_present_to_apicid(cpu);
+ int cpu0_nmi_registered = 0;
unsigned long flags;
- int err;
+ int err, ret = 0;
WARN_ON(irqs_disabled());
common_cpu_up(cpu, tidle);
- err = do_boot_cpu(apicid, cpu, tidle);
+ err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
if (err) {
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
- return -EIO;
+ ret = -EIO;
+ goto unreg_nmi;
}
/*
touch_nmi_watchdog();
}
- return 0;
+unreg_nmi:
+ /*
+ * Clean up the nmi handler. Do this after the callin and callout sync
+ * to avoid impact of possible long unregister time.
+ */
+ if (cpu0_nmi_registered)
+ unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
+
+ return ret;
}
/**
if (vls) {
if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
+ !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
!IS_ENABLED(CONFIG_X86_64)) {
vls = false;
} else {
static unsigned long stack_maxrandom_size(unsigned long task_size)
{
unsigned long max = 0;
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE)) {
+ if (current->flags & PF_RANDOMIZE) {
max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
max <<= PAGE_SHIFT;
}
static unsigned long arch_rnd(unsigned int rndbits)
{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
}
unsigned long arch_mmap_rnd(void)
{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
}
static struct bau_operations ops __ro_after_init;
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
-static int timeout_base_ns[] = {
+static const int timeout_base_ns[] = {
20,
160,
1280,
* set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
* Such a message must be ignored.
*/
-void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
+static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
{
unsigned long mmr_image;
unsigned char swack_vec;
for (queue = 0; queue < set->nr_hw_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue);
if (!mask)
- return -EINVAL;
+ goto fallback;
for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue;
}
return 0;
+
+fallback:
+ WARN_ON_ONCE(set->nr_hw_queues > 1);
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
return ERR_PTR(ret);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
blk_mq_put_ctx(alloc_data.ctx);
- blk_queue_exit(q);
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
- blk_queue_exit(q);
-
return rq;
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
/*
* Get the bios in the request so we can re-queue them.
*/
- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
- req_op(shadow[i].request) == REQ_OP_DISCARD ||
- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+ req_op(shadow[j].request) == REQ_OP_DISCARD ||
+ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
config CLKSRC_PISTACHIO
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
- depends on HAS_IOMEM
+ depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
*/
- for (i = i; i < timer_count; i++) {
+ for (i = 0; i < timer_count; i++) {
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
+ return irq;
}
/* map memory, let base point to the STI instance */
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p);
+ if (ret) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- return -ENOENT;
+ return ret;
}
/* get hold of clock */
const char *name = of_base->name ? of_base->name : np->full_name;
of_base->base = of_io_request_and_map(np, of_base->index, name);
- if (!of_base->base) {
+ if (IS_ERR(of_base->base)) {
pr_err("Failed to iomap (%s)\n", name);
- return -ENXIO;
+ return PTR_ERR(of_base->base);
}
return 0;
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return mul_ext_fp(cpu->sample.core_avg_perf,
- cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+ return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_buf_src;
+ goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_buf_src:
- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
unmap_src:
i915_gem_object_unpin_map(obj);
put_obj:
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(obj);
return ret;
}
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
}
static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *engine,
- struct i915_gem_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *from = engine->legacy_active_context;
+
if (!ppgtt)
return false;
/* Always load the ppgtt on first use */
- if (!engine->legacy_active_context)
+ if (!from)
return true;
/* Same context without new entries, skip */
- if (engine->legacy_active_context == to &&
+ if ((!from->ppgtt || from->ppgtt == ppgtt) &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
int ret;
trace_switch_mm(engine, to);
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
+ engine->legacy_active_context = to;
return 0;
}
goto err_unpin;
}
+ ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
+ if (ret)
+ goto err_unpin;
+
ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
- * so look for all the possible values for each port and abort if more
- * than one is found. */
+ * so look for all the possible values for each port.
+ */
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
- /* Find the child device to use, abort if more than one found. */
+ /*
+ * Find the first child device to reference the port, report if more
+ * than one found.
+ */
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
- return;
+ } else {
+ child = it;
}
- child = it;
}
}
}
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
+ return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
!gpu_reset_clobbers_display(dev_priv))
return;
+ /* We have a modeset vs reset deadlock, defensively unbreak it.
+ *
+ * FIXME: We can do a _lot_ better, this is just a first iteration.
+ */
+ i915_gem_set_wedged(dev_priv);
+ DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
- u8 data;
+ u8 data = 0;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */
if (!gpio_desc) {
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- "panel", gpio_index,
+ NULL, gpio_index,
value ? GPIOD_OUT_LOW :
GPIOD_OUT_HIGH);
return ret;
}
+static u8 gtiir[] = {
+ [RCS] = 0,
+ [BCS] = 0,
+ [VCS] = 1,
+ [VCS2] = 1,
+ [VECS] = 3,
+};
+
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- /* After a GPU reset, we may have requests to replay */
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+ /*
+ * Clear any pending interrupt state.
+ *
+ * We do it twice out of paranoia that some of the IIR are double
+ * buffered, and if we only reset it once there may still be
+ * an interrupt pending.
+ */
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* After a GPU reset, we may have requests to replay */
submit = false;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
if (!port_isset(&port[n]))
};
/* Logical Rings */
-void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!IS_GEN9(dev_priv)) {
- DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ if (!HAS_LSPCON(dev_priv)) {
+ DRM_ERROR("LSPCON is not supported on this platform\n");
return false;
}
return;
}
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
- ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
+ ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
- ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+static void sun4i_drv_lastclose(struct drm_device *dev)
+{
+ struct sun4i_drv *drv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(drv->fbdev);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
+ .lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+ depends on DRM || !DRM # if DRM=m, this can't be 'y'
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
}
up_read(&lists_rwsem);
- mutex_unlock(&device_mutex);
-
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+
+ mutex_unlock(&device_mutex);
+
ib_cache_cleanup_one(device);
ib_security_destroy_port_pkey_list(device);
kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext;
* for example due to freeing the resources
* (e.g mmput).
*/
+ ib_uverbs_event_handler(&file->event_handler, &event);
ib_dev->disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true);
rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
use_dsgl))
return ERR_PTR(-EINVAL);
} else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
- if (!dmac)
+ if (!dmac) {
+ kfree(ah);
return ERR_PTR(-EINVAL);
+ }
memcpy(ah->av.mac, dmac, ETH_ALEN);
}
u64 base = 0;
u32 i, j;
u32 k = 0;
- u32 low;
/* copy base values in obj_info */
- for (i = I40IW_HMC_IW_QP, j = 0;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ if ((i == I40IW_HMC_IW_SRQ) ||
+ (i == I40IW_HMC_IW_FSIMC) ||
+ (i == I40IW_HMC_IW_FSIAV)) {
+ info[i].base = 0;
+ info[i].cnt = 0;
+ continue;
+ }
get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512;
if (info[i].base > base) {
base = info[i].base;
k = i;
}
- low = (u32)(temp);
- if (low)
- info[i].cnt = low;
+ if (i == I40IW_HMC_IW_APBVT_ENTRY) {
+ info[i].cnt = 1;
+ continue;
+ }
+ if (i == I40IW_HMC_IW_QP)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ else if (i == I40IW_HMC_IW_CQ)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ else
+ info[i].cnt = (u32)(temp);
}
size = info[k].cnt * info[k].size + info[k].base;
if (size & 0x1FFFFF)
return 0;
}
+/**
+ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 i40iw_sc_decode_fpm_query(u64 *buf,
+ u32 buf_idx,
+ struct i40iw_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+ return temp;
+}
+
/**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer
struct i40iw_hmc_info *hmc_info,
struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
{
- u64 temp;
struct i40iw_hmc_obj_info *obj_info;
- u32 i, j, size;
+ u64 temp;
+ u32 size;
u16 max_pe_sds;
obj_info = hmc_info->hmc_obj;
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
- for (i = I40IW_HMC_IW_QP, j = 8;
- i <= I40IW_HMC_IW_ARP; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- if (i == I40IW_HMC_IW_QP)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
- else if (i == I40IW_HMC_IW_CQ)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
- else
- obj_info[i].max_cnt = (u32)temp;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = ((u64)1 << size);
- }
- for (i = I40IW_HMC_IW_MR, j = 48;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- obj_info[i].max_cnt = (u32)temp;
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = LS_64_1(1, size);
- }
+ get_64bit_val(buf, 16, &temp);
+ obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
+
+ i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
+ i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
+
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
+ i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
get_64bit_val(buf, 64, &temp);
+ obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
if (!hmc_fpm_misc->xf_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
+
get_64bit_val(buf, 80, &temp);
+ obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_Q1FL].size = 4;
hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
if (!hmc_fpm_misc->q1_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+
return 0;
}
hmc_info->sd_table.sd_entry = virt_mem.va;
}
- /* fill size of objects which are fixed */
- hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
return ret_code;
}
{
u8 fcn_id = vsi->fcn_id;
- if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+ if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
vsi->dev->fcn_id_array[fcn_id] = false;
i40iw_hw_stats_stop_timer(vsi);
}
I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
I40IW_SHADOWAREA_MASK = (128 - 1),
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
};
enum i40iw_alignment {
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
- I40IW_CQ0_ALIGNMENT_MASK);
+ I40IW_CQ0_ALIGNMENT);
if (ret)
return ret;
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
- I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+ I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29,
op_info = &info->op.inline_rdma_write;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+ info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
u8 *wqe_size)
{
if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
struct pvrdma_dev *dev = to_vdev(ibcq->device);
struct pvrdma_cq *cq = to_vcq(ibcq);
u32 val = cq->cq_handle;
+ unsigned long flags;
+ int has_data = 0;
val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
pvrdma_write_uar_cq(dev, val);
- return 0;
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned int head;
+
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data == PVRDMA_INVALID_IDX))
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return has_data;
}
/**
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
{ "ELAN1000", 0 },
{ }
};
return 0;
if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
#define AT91_RTC_IMR 0x28
#define AT91_RTC_IRQ_MASK 0x1f
-void __init aic_common_rtc_irq_fixup(struct device_node *root)
+void __init aic_common_rtc_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np)
- np = of_find_compatible_node(root, NULL,
+ np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc");
if (!np)
#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
-void __init aic_common_rtt_irq_fixup(struct device_node *root)
+void __init aic_common_rtt_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
return;
match = of_match_node(matches, root);
- of_node_put(root);
if (match) {
- void (*fixup)(struct device_node *) = match->data;
- fixup(root);
+ void (*fixup)(void) = match->data;
+ fixup();
}
of_node_put(root);
const char *name, int nirqs,
const struct of_device_id *matches);
-void __init aic_common_rtc_irq_fixup(struct device_node *root);
+void __init aic_common_rtc_irq_fixup(void);
-void __init aic_common_rtt_irq_fixup(struct device_node *root);
+void __init aic_common_rtt_irq_fixup(void);
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
.xlate = aic_irq_domain_xlate,
};
-static void __init at91rm9200_aic_irq_fixup(struct device_node *root)
+static void __init at91rm9200_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
-static void __init at91sam9260_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9260_aic_irq_fixup(void)
{
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtt_irq_fixup();
}
-static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9g45_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
}
static const struct of_device_id aic_irq_fixups[] __initconst = {
.xlate = aic5_irq_domain_xlate,
};
-static void __init sama5d3_aic_irq_fixup(struct device_node *root)
+static void __init sama5d3_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
static const struct of_device_id aic5_irq_fixups[] __initconst = {
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
*dev_id = args.args[0];
break;
}
+ index++;
} while (!ret);
return ret;
#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
-#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
+#ifdef CONFIG_ACPI_NUMA
struct its_srat_map {
/* numa node id */
u32 numa_node;
u32 its_id;
};
-static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
+static struct its_srat_map *its_srat_maps __initdata;
static int its_in_srat __initdata;
static int __init acpi_get_its_numa_node(u32 its_id)
return NUMA_NO_NODE;
}
+static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
const unsigned long end)
{
return -EINVAL;
}
- if (its_in_srat >= MAX_NUMNODES) {
- pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
- MAX_NUMNODES);
- return -EINVAL;
- }
-
node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
static void __init acpi_table_parse_srat_its(void)
{
+ int count;
+
+ count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+ gic_acpi_match_srat_its, 0);
+ if (count <= 0)
+ return;
+
+ its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
+ GFP_KERNEL);
+ if (!its_srat_maps) {
+ pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+ return;
+ }
+
acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
gic_acpi_parse_srat_its, 0);
}
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+ kfree(its_srat_maps);
+}
#else
static void __init acpi_table_parse_srat_its(void) { }
static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
#endif
static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
acpi_table_parse_srat_its();
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
gic_acpi_parse_madt_its, 0);
+ acpi_its_srat_maps_free();
}
#else
static void __init its_acpi_probe(void) { }
if (static_key_true(&supports_deactivate))
gic_write_eoir(irqnr);
+ else
+ isb();
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
void __iomem *reg;
int enabled;
u64 val;
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ isb();
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
goto out;
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
- if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
handle_bad_irq(desc);
- else
+ } else {
+ isb();
generic_handle_irq(cascade_irq);
+ }
out:
chained_irq_exit(chip, desc);
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
#define FSM_TIMER_DEBUG 0
-void
+int
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
fsm->event_count, GFP_KERNEL);
+ if (fsm->jumpmatrix == NULL)
+ return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+ return 0;
}
EXPORT_SYMBOL(mISDN_FsmNew);
void *arg;
};
-extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
extern void mISDN_FsmFree(struct Fsm *);
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
extern void mISDN_FsmChangeState(struct FsmInst *, int);
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
- mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
- return 0;
+ return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
}
void
int
Isdnl2_Init(u_int *deb)
{
+ int res;
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
- mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
- TEIInit(deb);
+ res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+ if (res)
+ goto error;
+ res = TEIInit(deb);
+ if (res)
+ goto error_fsm;
return 0;
+
+error_fsm:
+ mISDN_FsmFree(&l2fsm);
+error:
+ mISDN_unregister_Bprotocol(&X75SLP);
+ return res;
}
void
int TEIInit(u_int *deb)
{
+ int res;
debug = deb;
teifsmu.state_count = TEI_STATE_COUNT;
teifsmu.event_count = TEI_EVENT_COUNT;
teifsmu.strEvent = strTeiEvent;
teifsmu.strState = strTeiState;
- mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ if (res)
+ goto error;
teifsmn.state_count = TEI_STATE_COUNT;
teifsmn.event_count = TEI_EVENT_COUNT;
teifsmn.strEvent = strTeiEvent;
teifsmn.strState = strTeiState;
- mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ if (res)
+ goto error_smn;
deactfsm.state_count = DEACT_STATE_COUNT;
deactfsm.event_count = DEACT_EVENT_COUNT;
deactfsm.strEvent = strDeactEvent;
deactfsm.strState = strDeactState;
- mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ if (res)
+ goto error_deact;
return 0;
+
+error_deact:
+ mISDN_FsmFree(&teifsmn);
+error_smn:
+ mISDN_FsmFree(&teifsmu);
+error:
+ return res;
}
void TEIFree(void)
if (mddev->safemode == 1)
mddev->safemode = 0;
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
- if (mddev->in_sync || !mddev->sync_checkers) {
+ if (mddev->in_sync || mddev->sync_checkers) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
bool need_split_bio;
struct bio *split_bio;
- unsigned int has_flush:1; /* include flush request */
- unsigned int has_fua:1; /* include fua request */
- unsigned int has_null_flush:1; /* include empty flush request */
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include null flush request */
+ unsigned int has_flush_payload:1; /* include flush payload */
/*
* io isn't sent yet, flush/fua request can only be submitted till it's
* the first IO in running_ios list
struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
+ bool has_null_flush;
+ bool has_flush_payload;
if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev);
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+
+ /*
+ * if the io doesn't not have null_flush or flush payload,
+ * it is not safe to access it after releasing io_list_lock.
+ * Therefore, it is necessary to check the condition with
+ * the lock held.
+ */
+ has_null_flush = io->has_null_flush;
+ has_flush_payload = io->has_flush_payload;
+
if (log->need_cache_flush && !list_empty(&io->stripe_list))
r5l_move_to_end_ios(log);
else
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
- if (io->has_null_flush) {
+ /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+ if (has_null_flush) {
struct bio *bi;
WARN_ON(bio_list_empty(&io->flush_barriers));
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
bio_endio(bi);
- atomic_dec(&io->pending_stripe);
+ if (atomic_dec_and_test(&io->pending_stripe)) {
+ __r5l_stripe_write_finished(io);
+ return;
+ }
}
}
-
- /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
- if (atomic_read(&io->pending_stripe) == 0)
- __r5l_stripe_write_finished(io);
+ /* decrease pending_stripe for flush payload */
+ if (has_flush_payload)
+ if (atomic_dec_and_test(&io->pending_stripe))
+ __r5l_stripe_write_finished(io);
}
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
payload->size = cpu_to_le32(sizeof(__le64));
payload->flush_stripes[0] = cpu_to_le64(sect);
io->meta_offset += meta_size;
+ /* multiple flush payloads count as one pending_stripe */
+ if (!io->has_flush_payload) {
+ io->has_flush_payload = 1;
+ atomic_inc(&io->pending_stripe);
+ }
mutex_unlock(&log->io_mutex);
}
*/
int r5c_journal_mode_set(struct mddev *mddev, int mode)
{
- struct r5conf *conf = mddev->private;
- struct r5l_log *log = conf->log;
-
- if (!log)
- return -ENODEV;
+ struct r5conf *conf;
+ int err;
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf || !conf->log) {
+ mddev_unlock(mddev);
+ return -ENODEV;
+ }
+
if (raid5_calc_degraded(conf) > 0 &&
- mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ mddev_unlock(mddev);
return -EINVAL;
+ }
mddev_suspend(mddev);
conf->log->r5c_journal_mode = mode;
mddev_resume(mddev);
+ mddev_unlock(mddev);
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
mdname(mddev), mode, r5c_journal_mode_str[mode]);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- if (bond_update_speed_duplex(new_slave))
+ if (bond_update_speed_duplex(new_slave) &&
+ bond_needs_speed_duplex(bond))
new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
continue;
case BOND_LINK_UP:
- if (bond_update_speed_duplex(slave)) {
+ if (bond_update_speed_duplex(slave) &&
+ bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
- netdev_warn(bond->dev,
- "failed to get link speed/duplex for %s\n",
- slave->dev->name);
+ if (net_ratelimit())
+ netdev_warn(bond->dev,
+ "failed to get link speed/duplex for %s\n",
+ slave->dev->name);
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
USING_SOFT_PARAMS = (1 << 6),
MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9),
+ ROOT_NO_RELAXED_ORDERING = (1 << 10),
};
enum {
dev->name, adap->params.vpd.id, adap->name, buf);
}
-static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
-{
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
-}
-
/*
* Free the following resources:
* - memory used for tables
}
pci_enable_pcie_error_reporting(pdev);
- enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
+ int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
if (cong >= 0)
c.iqns_to_fl0congen |=
USING_MSI = (1UL << 1),
USING_MSIX = (1UL << 2),
QUEUES_BOUND = (1UL << 3),
+ ROOT_NO_RELAXED_ORDERING = (1UL << 4),
};
/*
*/
adapter->name = pci_name(pdev);
adapter->msg_enable = DFLT_MSG_ENABLE;
+
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
err = adap_init0(adapter);
if (err)
goto err_unmap_bar;
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
+ int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
/*
* If we're using MSI interrupts and we're not initializing the
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
/* In T6, for egress queue type FL there is internal overhead
return;
}
- if (link) {
+ if (link)
netif_carrier_on(netdev);
- rtnl_lock();
- dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
- rtnl_unlock();
- } else {
+ else
netif_carrier_off(netdev);
- }
rcu_read_unlock();
}
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+ }
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
+ struct device *dev = ndev->dev.parent;
int addr, found;
if (!mdio_bus_data)
else
err = mdiobus_register(new_bus);
if (err != 0) {
- netdev_err(ndev, "Cannot register the MDIO bus\n");
+ dev_err(dev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
irq_str = irq_num;
break;
}
- netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
- phydev->phy_id, addr, irq_str, phydev_name(phydev),
- act ? " active" : "");
+ phy_attached_info(phydev);
found = 1;
}
if (!found && !mdio_node) {
- netdev_warn(ndev, "No PHY found\n");
+ dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
mdiobus_free(new_bus);
return -ENODEV;
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
- brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
- &gscan_cfg, sizeof(gscan_cfg));
+ if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
+ brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
+ "pfn_gscan_cfg",
+ &gscan_cfg, sizeof(gscan_cfg));
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
+ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
* (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
- * step. Supported only in integrated solutions.
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+ * next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const char *fw_name_pre_next_step;
+ const char *fw_name_pre_b_or_c_step;
const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
- fw_pre_name = cfg->fw_name_pre_next_step;
+ (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+ fw_pre_name = cfg->fw_name_pre_b_or_c_step;
else if (drv->trans->cfg->integrated &&
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
cfg->fw_name_pre_rf_next_step)
int num_of_ch, __le32 *channels, u16 fw_mcc)
{
int ch_idx;
- u16 ch_flags, prev_ch_flags = 0;
+ u16 ch_flags;
+ u32 reg_rule_flags, prev_reg_rule_flags = 0;
const u8 *nvm_chan = cfg->ext_nvm ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
continue;
}
+ reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+ ch_flags, cfg);
+
/* we can't continue the same rule */
- if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
center_freq - prev_center_freq > 20) {
valid_rules++;
new_rule = true;
rule->power_rule.max_eirp =
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
- rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ rule->flags = reg_rule_flags;
/* rely on auto-calculation to merge BW of contiguous chans */
rule->flags |= NL80211_RRF_AUTO_BW;
rule->freq_range.max_bandwidth_khz = 0;
- prev_ch_flags = ch_flags;
prev_center_freq = center_freq;
+ prev_reg_rule_flags = reg_rule_flags;
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+ "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
center_freq,
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(160MHZ),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
- ch_flags,
+ ch_flags, reg_rule_flags,
((ch_flags & NVM_CHANNEL_ACTIVE) &&
!(ch_flags & NVM_CHANNEL_RADAR))
- ? "" : "not ");
+ ? "Ad-Hoc" : "");
}
regd->n_reg_rules = valid_rules;
entry = &wifi_pkg->package.elements[idx++];
if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX))
- return -EINVAL;
+ (entry->integer.value > U8_MAX)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
mvm->geo_profiles[i].values[j] = entry->integer.value;
}
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
- while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC
+ * queues, so we should never get a second deferred
+ * frame for the RA/TID.
+ */
+ iwl_mvm_start_mac_queues(mvm, info->hw_queue);
ieee80211_free_txskb(mvm->hw, skb);
+ }
}
spin_unlock_bh(&mvm_sta->lock);
}
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len,
reduced_txp);
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
- rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
continue;
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success,
reduced_txp);
rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success);
}
baid_data = rcu_dereference(mvm->baid_map[baid]);
if (!baid_data) {
- WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
- "Received baid %d, but no data exists for this BAID\n",
- baid);
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder);
return false;
}
data = rcu_dereference(mvm->baid_map[baid]);
if (!data) {
- WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder_data);
goto out;
}
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
.add_modify = update ? 1 : 0,
.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
- STA_FLG_MIMO_EN_MSK),
+ STA_FLG_MIMO_EN_MSK |
+ STA_FLG_RTS_MIMO_PROT),
.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
};
int ret;
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
- sta->addr, ba_data->tid);
+ ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+ sta->addr, ba_data->tid);
unlock:
rcu_read_unlock();
}
else
udp_hdr(skb)->check = 0;
- /* mac header len should include IV, size is in words */
- if (info->control.hw_key)
+ /*
+ * mac header len should include IV, size is in words unless
+ * the IV is added by the firmware like in WEP.
+ * In new Tx API, the IV is always added by the firmware.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
mh_len += info->control.hw_key->iv_len;
mh_len /= 2;
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
struct iwl_mvm_tid_data *tid_data;
struct iwl_mvm_sta *mvmsta;
+ ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
- if (opt_tokens[i].token & ~allowed_opts) {
+ if ((opt_tokens[i].token & opts->mask) &&
+ (opt_tokens[i].token & ~allowed_opts)) {
pr_warn("invalid parameter '%s'\n",
opt_tokens[i].pattern);
}
return;
}
+ nvmeq->cqe_seen = 1;
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
nvme_end_request(req, cqe->status, cqe->result);
}
consumed++;
}
- if (consumed) {
+ if (consumed)
nvme_ring_cq_doorbell(nvmeq);
- nvmeq->cqe_seen = 1;
- }
}
static irqreturn_t nvme_irq(int irq, void *data)
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
- memset(id->mn, ' ', sizeof(id->mn));
- strncpy((char *)id->mn, "Linux", sizeof(id->mn));
-
- memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
-
id->rab = 6;
/*
static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
{
- static struct nvmet_fc_ls_iod *iod;
+ struct nvmet_fc_ls_iod *iod;
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{
- static struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_fcp_iod *fod;
lockdep_assert_held(&queue->qlock);
{
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
- struct nvmet_fc_defer_fcp_req *deferfcp;
+ struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
int i, writedataactive;
bool disconnect;
}
/* Cleanup defer'ed IOs in queue */
- list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+ list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+ req_list) {
list_del(&deferfcp->req_list);
kfree(deferfcp);
}
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
+ u64 mask;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ dev->coherent_dma_mask &= mask;
+ *dev->dma_mask &= mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
+ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
bridge = pci_upstream_bridge(bridge);
}
- if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
- return NULL;
+ if (highest_pcie_bridge &&
+ pci_pcie_type(highest_pcie_bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ return highest_pcie_bridge;
- return highest_pcie_bridge;
+ return NULL;
}
EXPORT_SYMBOL(pci_find_pcie_root_port);
PCI_EXP_DEVCTL_EXT_TAG);
}
+/**
+ * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
+ * @dev: PCI device to query
+ *
+ * Returns true if the device has enabled relaxed ordering attribute.
+ */
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
+{
+ u16 v;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
+
+ return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
+}
+EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
+
+static void pci_configure_relaxed_ordering(struct pci_dev *dev)
+{
+ struct pci_dev *root;
+
+ /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
+ if (dev->is_virtfn)
+ return;
+
+ if (!pcie_relaxed_ordering_enabled(dev))
+ return;
+
+ /*
+ * For now, we only deal with Relaxed Ordering issues with Root
+ * Ports. Peer-to-Peer DMA is another can of worms.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return;
+
+ if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN);
+ dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
+ }
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
pci_configure_mps(dev);
pci_configure_extended_tags(dev);
+ pci_configure_relaxed_ordering(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
+/*
+ * Some devices have problems with Transaction Layer Packets with the Relaxed
+ * Ordering Attribute set. Such devices should mark themselves and other
+ * Device Drivers should check before sending TLPs with RO set.
+ */
+static void quirk_relaxedordering_disable(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
+ dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
+}
+
+/*
+ * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
+ * Complex has a Flow Control Credit issue which can cause performance
+ * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
+/*
+ * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * where Upstream Transaction Layer Packets with the Relaxed Ordering
+ * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
+ * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
+ * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
+ * November 10, 2010). As a result, on this platform we can't use Relaxed
+ * Ordering for Upstream TLPs.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
/*
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
* values for the Attribute as were supplied in the header of the
return;
}
+ if (ioa_cfg->scsi_unblock) {
+ ioa_cfg->scsi_unblock = 0;
+ ioa_cfg->scsi_blocked = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->scsi_blocked)
+ scsi_block_requests(ioa_cfg->host);
+ }
+
if (!ioa_cfg->scan_enabled) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
ENTER;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
ipr_trace;
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ ioa_cfg->scsi_unblock = 1;
+ schedule_work(&ioa_cfg->work_q);
}
ioa_cfg->in_reset_reload = 0;
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock(ioa_cfg->host->host_lock);
-
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
- scsi_block_requests(ioa_cfg->host);
-
+ ioa_cfg->scsi_unblock = 1;
schedule_work(&ioa_cfg->work_q);
LEAVE;
return IPR_RC_JOB_RETURN;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
wmb();
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ioa_cfg->scsi_unblock = 0;
+ ioa_cfg->scsi_blocked = 1;
scsi_block_requests(ioa_cfg->host);
+ }
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
wake_up_all(&ioa_cfg->reset_wait_q);
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ ioa_cfg->scsi_unblock = 1;
+ schedule_work(&ioa_cfg->work_q);
}
return;
} else {
u8 cfg_locked:1;
u8 clear_isr:1;
u8 probe_done:1;
+ u8 scsi_unblock:1;
+ u8 scsi_blocked:1;
u8 revid;
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
- if (!test_bit(i, vha->hw->req_qid_map))
- continue;
-
if (req || !buf) {
length = req ?
req->length : REQUEST_ENTRY_CNT_24XX;
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
- if (!test_bit(i, vha->hw->rsp_qid_map))
- continue;
-
if (rsp || !buf) {
length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ;
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
- if (!test_bit(i, vha->hw->req_qid_map))
- continue;
-
if (req || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
- if (!test_bit(i, vha->hw->rsp_qid_map))
- continue;
-
if (rsp || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
- if (unlikely(!ret))
+ if (unlikely(ret))
return ret;
recv_page_code = ((unsigned char *)buf)[0];
kref_init(&tpnt->kref);
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
- disk->queue = SDp->request_queue;
/* SCSI tape doesn't register this gendisk via add_disk(). Manually
* take queue reference that release_disk() expects. */
- if (!blk_get_queue(disk->queue))
+ if (!blk_get_queue(SDp->request_queue))
goto out_put_disk;
+ disk->queue = SDp->request_queue;
tpnt->driver = &st_template;
tpnt->device = SDp;
domain->dev = &pdev->dev;
- ret = pm_genpd_init(&domain->genpd, NULL, true);
- if (ret) {
- dev_err(domain->dev, "Failed to init power domain\n");
- return ret;
- }
-
domain->regulator = devm_regulator_get_optional(domain->dev, "power");
if (IS_ERR(domain->regulator)) {
if (PTR_ERR(domain->regulator) != -ENODEV) {
- dev_err(domain->dev, "Failed to get domain's regulator\n");
+ if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's regulator\n");
return PTR_ERR(domain->regulator);
}
} else {
domain->voltage, domain->voltage);
}
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err(domain->dev, "Failed to init power domain\n");
+ return ret;
+ }
+
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
ti_sci_pd->dev = dev;
+ ti_sci_pd->pd.name = "ti_sci_pd";
+
ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
struct tty_struct *tty;
struct path *pts_path;
struct dentry *dentry;
+ struct vfsmount *mnt;
int retval;
int index;
if (retval)
return retval;
- fsi = devpts_acquire(filp);
+ fsi = devpts_acquire(filp, &mnt);
if (IS_ERR(fsi)) {
retval = PTR_ERR(fsi);
goto out_free_file;
pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
if (!pts_path)
goto err_release;
- pts_path->mnt = filp->f_path.mnt;
+ pts_path->mnt = mnt;
pts_path->dentry = dentry;
path_get(pts_path);
tty->link->driver_data = pts_path;
path_put(pts_path);
kfree(pts_path);
err_release:
+ mntput(mnt);
tty_unlock(tty);
// This will also put-ref the fsi
tty_release(inode, filp);
devpts_kill_index(fsi, index);
out_put_fsi:
devpts_release(fsi);
+ mntput(mnt);
out_free_file:
tty_free_file(filp);
return retval;
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
/*
* XXX: Add support for merging bio_vec when using different page
{
unsigned long random_variable = 0;
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE)) {
+ if (current->flags & PF_RANDOMIZE) {
random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
return sb->s_fs_info;
}
-struct pts_fs_info *devpts_acquire(struct file *filp)
+struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt)
{
struct pts_fs_info *result;
struct path path;
path = filp->f_path;
path_get(&path);
+ *ptsmnt = NULL;
/* Has the devpts filesystem already been found? */
sb = path.mnt->mnt_sb;
* pty code needs to hold extra references in case of last /dev/tty close
*/
atomic_inc(&sb->s_active);
+ *ptsmnt = mntget(path.mnt);
result = DEVPTS_SB(sb);
out:
unsigned long bytes; /* Bytes to write to page */
offset = (pos & (PAGE_SIZE - 1));
- bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+ bytes = min_t(loff_t, PAGE_SIZE - offset, length);
rpage = __iomap_read_page(inode, pos);
if (IS_ERR(rpage))
unsigned offset, bytes;
offset = pos & (PAGE_SIZE - 1); /* Within page */
- bytes = min_t(unsigned, PAGE_SIZE - offset, count);
+ bytes = min_t(loff_t, PAGE_SIZE - offset, count);
if (IS_DAX(inode))
status = iomap_dax_zero(pos, offset, bytes, iomap);
WARN_ON_ONCE(1);
dquot->dq_dqb.dqb_rsvspace = 0;
}
+ if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+ dquot->dq_dqb.dqb_bsoftlimit)
+ dquot->dq_dqb.dqb_btime = (time64_t) 0;
+ clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
dquot->dq_dqb.dqb_curspace -= number;
else
dquot->dq_dqb.dqb_curspace = 0;
- if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+ if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+ dquot->dq_dqb.dqb_bsoftlimit)
dquot->dq_dqb.dqb_btime = (time64_t) 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
static int info_bdq_free(struct dquot *dquot, qsize_t space)
{
+ qsize_t tspace;
+
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
+
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
- dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+ tspace <= dquot->dq_dqb.dqb_bsoftlimit)
return QUOTA_NL_NOWARN;
- if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
+ if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
return QUOTA_NL_BSOFTBELOW;
- if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
- dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
+ if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
+ tspace - space < dquot->dq_dqb.dqb_bhardlimit)
return QUOTA_NL_BHARDBELOW;
return QUOTA_NL_NOWARN;
}
if (check_blim) {
if (!dm->dqb_bsoftlimit ||
- dm->dqb_curspace < dm->dqb_bsoftlimit) {
+ dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* free inodes to the left? */
if (useleft && trec.ir_freecount) {
- rec = trec;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
cur = tcur;
pag->pagl_leftrec = trec.ir_startino;
pag->pagl_rightrec = rec.ir_startino;
pag->pagl_pagino = pagino;
+ rec = trec;
goto alloc_inode;
}
return 0;
}
+ /*
+ * During the second phase of log recovery, we need iget and
+ * iput to behave like they do for an active filesystem.
+ * xfs_fs_drop_inode needs to be able to prevent the deletion
+ * of inodes before we're done replaying log items on those
+ * inodes. Turn it off immediately after recovery finishes
+ * so that we don't leak the quota inodes if subsequent mount
+ * activities fail.
+ */
+ mp->m_super->s_flags |= MS_ACTIVE;
error = xlog_recover_finish(mp->m_log);
if (!error)
xfs_log_work_queue(mp);
+ mp->m_super->s_flags &= ~MS_ACTIVE;
return error;
}
}
}
- /*
- * During the second phase of log recovery, we need iget and
- * iput to behave like they do for an active filesystem.
- * xfs_fs_drop_inode needs to be able to prevent the deletion
- * of inodes before we're done replaying log items on those
- * inodes.
- */
- mp->m_super->s_flags |= MS_ACTIVE;
-
/*
* Finish recovering the file system. This part needed to be delayed
* until after the root and real-time bitmap inodes were consistently
out_quota:
xfs_qm_unmount_quotas(mp);
out_rtunmount:
- mp->m_super->s_flags &= ~MS_ACTIVE;
xfs_rtunmount_inodes(mp);
out_rele_rip:
IRELE(rip);
cancel_delayed_work_sync(&mp->m_reclaim_work);
xfs_reclaim_inodes(mp, SYNC_WAIT);
+ /* Clean out dquots that might be in memory after quotacheck. */
+ xfs_qm_unmount(mp);
out_log_dealloc:
mp->m_flags |= XFS_MOUNT_UNMOUNTING;
xfs_log_mount_cancel(mp);
struct pts_fs_info;
-struct pts_fs_info *devpts_acquire(struct file *);
+struct pts_fs_info *devpts_acquire(struct file *, struct vfsmount **ptsmnt);
void devpts_release(struct pts_fs_info *);
int devpts_new_index(struct pts_fs_info *);
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
+void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end);
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
extern int do_swap_account;
#endif
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
{
}
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
*/
#define SOCKWQ_ASYNC_NOSPACE 0
#define SOCKWQ_ASYNC_WAITDATA 1
#define sysctl_softlockup_all_cpu_backtrace 0
#define sysctl_hardlockup_all_cpu_backtrace 0
#endif
+
+#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+ defined(CONFIG_HARDLOCKUP_DETECTOR)
+void watchdog_update_hrtimer_threshold(u64 period);
+#else
+static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+#endif
+
extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
+#include <linux/sched/coredump.h> /* MMF_* */
+#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
return tsk->signal->oom_mm;
}
+/*
+ * Checks whether a page fault on the given mm is still reliable.
+ * This is no longer true if the oom reaper started to reap the
+ * address space which is reflected by MMF_UNSTABLE flag set in
+ * the mm. At that moment any !shared mapping would lose the content
+ * and could cause a memory corruption (zero pages instead of the
+ * original content).
+ *
+ * User should call this before establishing a page table entry for
+ * a !shared mapping and under the proper page table lock.
+ *
+ * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
+ */
+static inline int check_stable_address_space(struct mm_struct *mm)
+{
+ if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ return VM_FAULT_SIGBUS;
+ return 0;
+}
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
* the direct_complete optimization.
*/
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+ /* Don't use Relaxed Ordering for TLPs directed at this device */
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
- void (*event_mapped) (struct perf_event *event); /*optional*/
- void (*event_unmapped) (struct perf_event *event); /*optional*/
+ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
+ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
__ret; \
})
+#define __wait_event_killable_timeout(wq_head, condition, timeout) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), \
+ TASK_KILLABLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a kill signal is received.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a kill signal.
+ *
+ * Only kill signals interrupt this process.
+ */
+#define wait_event_killable_timeout(wq_head, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_killable_timeout(wq_head, \
+ condition, timeout); \
+ __ret; \
+})
+
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
in6_dev_finish_destroy(idev);
}
+static inline void in6_dev_put_clear(struct inet6_dev **pidev)
+{
+ struct inet6_dev *idev = *pidev;
+
+ if (idev) {
+ in6_dev_put(idev);
+ *pidev = NULL;
+ }
+}
+
static inline void __in6_dev_put(struct inet6_dev *idev)
{
refcount_dec(&idev->refcnt);
BOND_MODE(bond) == BOND_MODE_ALB;
}
+static inline bool bond_needs_speed_duplex(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
+}
+
static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
{
return (BOND_MODE(bond) == BOND_MODE_TLB) &&
#include <linux/sched/signal.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_busy_read __read_mostly;
-extern unsigned int sysctl_net_busy_poll __read_mostly;
-
/* 0 - Reserved to indicate value not set
* 1..NR_CPUS - Reserved for sender_cpu
* NR_CPUS+1..~0 - Region available for NAPI IDs
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
static inline bool net_busy_loop_on(void)
{
return sysctl_net_busy_poll;
ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
}
+/**
+ * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
+ *
+ * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
+ * buffer reording internally, and therefore also handle the session timer.
+ *
+ * Trigger the timeout flow, which sends a DelBa.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid);
+
/* Rate control API */
/**
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
- int n, copy = len - off;
+ int n;
- n = copy_to_iter(skb->data + off, copy, to);
- if (n == copy)
+ n = copy_to_iter(skb->data + off, len, to);
+ if (n == len)
return 0;
+ iov_iter_revert(to, n);
return -EFAULT;
}
/* fsnotify events we care about. */
#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
- FS_MOVE_SELF | FS_EVENT_ON_CHILD)
+ FS_MOVE_SELF | FS_EVENT_ON_CHILD | FS_UNMOUNT)
static void audit_free_parent(struct audit_parent *parent)
{
list_del(&krule->rlist);
if (list_empty(&watch->rules)) {
+ /*
+ * audit_remove_watch() drops our reference to 'parent' which
+ * can get freed. Grab our own reference to be safe.
+ */
+ audit_get_parent(parent);
audit_remove_watch(watch);
-
- if (list_empty(&parent->watches)) {
- audit_get_parent(parent);
+ if (list_empty(&parent->watches))
fsnotify_destroy_mark(&parent->mark, audit_watch_group);
- audit_put_parent(parent);
- }
+ audit_put_parent(parent);
}
}
return can_add_hw;
}
+/*
+ * Complement to update_event_times(). This computes the tstamp_* values to
+ * continue 'enabled' state from @now, and effectively discards the time
+ * between the prior tstamp_stopped and now (as we were in the OFF state, or
+ * just switched (context) time base).
+ *
+ * This further assumes '@event->state == INACTIVE' (we just came from OFF) and
+ * cannot have been scheduled in yet. And going into INACTIVE state means
+ * '@event->tstamp_stopped = @now'.
+ *
+ * Thus given the rules of update_event_times():
+ *
+ * total_time_enabled = tstamp_stopped - tstamp_enabled
+ * total_time_running = tstamp_stopped - tstamp_running
+ *
+ * We can insert 'tstamp_stopped == now' and reverse them to compute new
+ * tstamp_* values.
+ */
+static void __perf_event_enable_time(struct perf_event *event, u64 now)
+{
+ WARN_ON_ONCE(event->state != PERF_EVENT_STATE_INACTIVE);
+
+ event->tstamp_stopped = now;
+ event->tstamp_enabled = now - event->total_time_enabled;
+ event->tstamp_running = now - event->total_time_running;
+}
+
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
list_add_event(event, ctx);
perf_group_attach(event);
- event->tstamp_enabled = tstamp;
- event->tstamp_running = tstamp;
- event->tstamp_stopped = tstamp;
+ /*
+ * We can be called with event->state == STATE_OFF when we create with
+ * .disabled = 1. In that case the IOC_ENABLE will call this function.
+ */
+ if (event->state == PERF_EVENT_STATE_INACTIVE)
+ __perf_event_enable_time(event, tstamp);
}
static void ctx_sched_out(struct perf_event_context *ctx,
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
- event->tstamp_enabled = tstamp - event->total_time_enabled;
+ __perf_event_enable_time(event, tstamp);
list_for_each_entry(sub, &event->sibling_list, group_entry) {
+ /* XXX should not be > INACTIVE if event isn't */
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
- sub->tstamp_enabled = tstamp - sub->total_time_enabled;
+ __perf_event_enable_time(sub, tstamp);
}
}
atomic_inc(&event->rb->aux_mmap_count);
if (event->pmu->event_mapped)
- event->pmu->event_mapped(event);
+ event->pmu->event_mapped(event, vma->vm_mm);
}
static void perf_pmu_output_stop(struct perf_event *event);
unsigned long size = perf_data_size(rb);
if (event->pmu->event_unmapped)
- event->pmu->event_unmapped(event);
+ event->pmu->event_unmapped(event, vma->vm_mm);
/*
* rb->aux_mmap_count will always drop before rb->mmap_count and
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
- event->pmu->event_mapped(event);
+ event->pmu->event_mapped(event, vma->vm_mm);
return ret;
}
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
- unsigned long flags;
+ unsigned long flags, trigger, tmp;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
irq_settings_clr_and_set(desc, clr, set);
+ trigger = irqd_get_trigger_type(&desc->irq_data);
+
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
if (irq_settings_has_no_balance_set(desc))
if (irq_settings_is_level(desc))
irqd_set(&desc->irq_data, IRQD_LEVEL);
- irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
+ tmp = irq_settings_get_trigger_mask(desc);
+ if (tmp != IRQ_TYPE_NONE)
+ trigger = tmp;
+
+ irqd_set(&desc->irq_data, trigger);
irq_put_desc_unlock(desc, flags);
}
struct irq_data *data = irq_get_irq_data(irq);
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
- if (!data || !ipimask || cpu > nr_cpu_ids)
+ if (!data || !ipimask || cpu >= nr_cpu_ids)
return INVALID_HWIRQ;
if (!cpumask_test_cpu(cpu, ipimask))
if (!chip->ipi_send_single && !chip->ipi_send_mask)
return -EINVAL;
- if (cpu > nr_cpu_ids)
+ if (cpu >= nr_cpu_ids)
return -EINVAL;
if (dest) {
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
+/*
+ * This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
+ * running at the same time without returning. When this happens we
+ * believe you've somehow ended up with a recursive module dependency
+ * creating a loop.
+ *
+ * We have no option but to fail.
+ *
+ * Userspace should proactively try to detect and prevent these.
+ */
+#define MAX_KMOD_ALL_BUSY_TIMEOUT 5
+
/*
modprobe_path is set via /proc/sys.
*/
pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
atomic_read(&kmod_concurrent_max),
MAX_KMOD_CONCURRENT, module_name);
- wait_event_interruptible(kmod_wq,
- atomic_dec_if_positive(&kmod_concurrent_max) >= 0);
+ ret = wait_event_killable_timeout(kmod_wq,
+ atomic_dec_if_positive(&kmod_concurrent_max) >= 0,
+ MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
+ if (!ret) {
+ pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
+ module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
+ return -ETIME;
+ } else if (ret == -ERESTARTSYS) {
+ pr_warn_ratelimited("request_module: sigkill sent for modprobe %s, giving up", module_name);
+ return ret;
+ }
}
trace_module_request(module_name, wait, _RET_IP_);
recalc_sigpending_and_wake(t);
}
}
- if (action->sa.sa_handler == SIG_DFL)
+ /*
+ * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
+ * debugging to leave init killable.
+ */
+ if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = specific_send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
fmt_cnt++;
}
- return __trace_printk(1/* fake ip will not be printed */, fmt,
- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
+/* Horrid workaround for getting va_list handling working with different
+ * argument type combinations generically for 32 and 64 bit archs.
+ */
+#define __BPF_TP_EMIT() __BPF_ARG3_TP()
+#define __BPF_TP(...) \
+ __trace_printk(1 /* Fake ip will not be printed. */, \
+ fmt, ##__VA_ARGS__)
+
+#define __BPF_ARG1_TP(...) \
+ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_TP(arg1, ##__VA_ARGS__) \
+ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
+ : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
+
+#define __BPF_ARG2_TP(...) \
+ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
+ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
+ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
+
+#define __BPF_ARG3_TP(...) \
+ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
+ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
+ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
+
+ return __BPF_TP_EMIT();
}
static const struct bpf_func_proto bpf_trace_printk_proto = {
* hardlockup detector generates a warning
*/
sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+ watchdog_update_hrtimer_threshold(sample_period);
}
/* Commands for resetting the watchdog */
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
+#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
+static DEFINE_PER_CPU(ktime_t, last_timestamp);
+static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
+static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
+
+void watchdog_update_hrtimer_threshold(u64 period)
+{
+ /*
+ * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
+ *
+ * So it runs effectively with 2.5 times the rate of the NMI
+ * watchdog. That means the hrtimer should fire 2-3 times before
+ * the NMI watchdog expires. The NMI watchdog on x86 is based on
+ * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
+ * might run way faster than expected and the NMI fires in a
+ * smaller period than the one deduced from the nominal CPU
+ * frequency. Depending on the Turbo-Mode factor this might be fast
+ * enough to get the NMI period smaller than the hrtimer watchdog
+ * period and trigger false positives.
+ *
+ * The sample threshold is used to check in the NMI handler whether
+ * the minimum time between two NMI samples has elapsed. That
+ * prevents false positives.
+ *
+ * Set this to 4/5 of the actual watchdog threshold period so the
+ * hrtimer is guaranteed to fire at least once within the real
+ * watchdog threshold.
+ */
+ watchdog_hrtimer_sample_threshold = period * 2;
+}
+
+static bool watchdog_check_timestamp(void)
+{
+ ktime_t delta, now = ktime_get_mono_fast_ns();
+
+ delta = now - __this_cpu_read(last_timestamp);
+ if (delta < watchdog_hrtimer_sample_threshold) {
+ /*
+ * If ktime is jiffies based, a stalled timer would prevent
+ * jiffies from being incremented and the filter would look
+ * at a stale timestamp and never trigger.
+ */
+ if (__this_cpu_inc_return(nmi_rearmed) < 10)
+ return false;
+ }
+ __this_cpu_write(nmi_rearmed, 0);
+ __this_cpu_write(last_timestamp, now);
+ return true;
+}
+#else
+static inline bool watchdog_check_timestamp(void)
+{
+ return true;
+}
+#endif
+
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
return;
}
+ if (!watchdog_check_timestamp())
+ return;
+
/* check for a hardlockup
* This is done by making sure our timer interrupt
* is incrementing. The timer interrupt should have
bool
select SOFTLOCKUP_DETECTOR
+#
+# Enables a timestamp based low pass filter to compensate for perf based
+# hard lockup detection which runs too fast due to turbo modes.
+#
+config HARDLOCKUP_CHECK_TIMESTAMP
+ bool
+
#
# arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard
# lockup detector rather than the perf based detector.
char name[16];
int u32s;
- sprintf(name, "cma-%s", cma->name);
+ scnprintf(name, sizeof(name), "cma-%s", cma->name);
tmp = debugfs_create_dir(name, cma_debugfs_root);
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/oom.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
struct mem_cgroup *memcg;
pgtable_t pgtable;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ int ret = 0;
VM_BUG_ON_PAGE(!PageCompound(page), page);
pgtable = pte_alloc_one(vma->vm_mm, haddr);
if (unlikely(!pgtable)) {
- mem_cgroup_cancel_charge(page, memcg, true);
- put_page(page);
- return VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
+ goto release;
}
clear_huge_page(page, haddr, HPAGE_PMD_NR);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*vmf->pmd))) {
- spin_unlock(vmf->ptl);
- mem_cgroup_cancel_charge(page, memcg, true);
- put_page(page);
- pte_free(vma->vm_mm, pgtable);
+ goto unlock_release;
} else {
pmd_t entry;
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto unlock_release;
+
/* Deliver the page fault to userland */
if (userfaultfd_missing(vma)) {
int ret;
}
return 0;
+unlock_release:
+ spin_unlock(vmf->ptl);
+release:
+ if (pgtable)
+ pte_free(vma->vm_mm, pgtable);
+ mem_cgroup_cancel_charge(page, memcg, true);
+ put_page(page);
+ return ret;
+
}
/*
ret = 0;
set = false;
if (pmd_none(*vmf->pmd)) {
- if (userfaultfd_missing(vma)) {
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret) {
+ spin_unlock(vmf->ptl);
+ } else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
}
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-
-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
- phys_addr_t *addr)
-{
- if (memblock.reserved.regions == memblock_reserved_init_regions)
- return 0;
-
- *addr = __pa(memblock.reserved.regions);
-
- return PAGE_ALIGN(sizeof(struct memblock_region) *
- memblock.reserved.max);
-}
-
-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
- phys_addr_t *addr)
+/**
+ * Discard memory and reserved arrays if they were allocated
+ */
+void __init memblock_discard(void)
{
- if (memblock.memory.regions == memblock_memory_init_regions)
- return 0;
+ phys_addr_t addr, size;
- *addr = __pa(memblock.memory.regions);
+ if (memblock.reserved.regions != memblock_reserved_init_regions) {
+ addr = __pa(memblock.reserved.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.reserved.max);
+ __memblock_free_late(addr, size);
+ }
- return PAGE_ALIGN(sizeof(struct memblock_region) *
- memblock.memory.max);
+ if (memblock.memory.regions == memblock_memory_init_regions) {
+ addr = __pa(memblock.memory.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.memory.max);
+ __memblock_free_late(addr, size);
+ }
}
-
#endif
/**
* @page: the page
*
* This function protects unlocked LRU pages from being moved to
- * another cgroup and stabilizes their page->mem_cgroup binding.
+ * another cgroup.
+ *
+ * It ensures lifetime of the returned memcg. Caller is responsible
+ * for the lifetime of the page; __unlock_page_memcg() is available
+ * when @page might get freed inside the locked section.
*/
-void lock_page_memcg(struct page *page)
+struct mem_cgroup *lock_page_memcg(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
- */
+ *
+ * The RCU lock also protects the memcg from being freed when
+ * the page state that is going to change is the only thing
+ * preventing the page itself from being freed. E.g. writeback
+ * doesn't hold a page reference and relies on PG_writeback to
+ * keep off truncation, migration and so forth.
+ */
rcu_read_lock();
if (mem_cgroup_disabled())
- return;
+ return NULL;
again:
memcg = page->mem_cgroup;
if (unlikely(!memcg))
- return;
+ return NULL;
if (atomic_read(&memcg->moving_account) <= 0)
- return;
+ return memcg;
spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) {
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
- return;
+ return memcg;
}
EXPORT_SYMBOL(lock_page_memcg);
/**
- * unlock_page_memcg - unlock a page->mem_cgroup binding
- * @page: the page
+ * __unlock_page_memcg - unlock and unpin a memcg
+ * @memcg: the memcg
+ *
+ * Unlock and unpin a memcg returned by lock_page_memcg().
*/
-void unlock_page_memcg(struct page *page)
+void __unlock_page_memcg(struct mem_cgroup *memcg)
{
- struct mem_cgroup *memcg = page->mem_cgroup;
-
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;
rcu_read_unlock();
}
+
+/**
+ * unlock_page_memcg - unlock a page->mem_cgroup binding
+ * @page: the page
+ */
+void unlock_page_memcg(struct page *page)
+{
+ __unlock_page_memcg(page->mem_cgroup);
+}
EXPORT_SYMBOL(unlock_page_memcg);
/*
#include <linux/debugfs.h>
#include <linux/userfaultfd_k.h>
#include <linux/dax.h>
+#include <linux/oom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg;
struct page *page;
+ int ret = 0;
pte_t entry;
/* File mapping without ->vm_ops ? */
vmf->address, &vmf->ptl);
if (!pte_none(*vmf->pte))
goto unlock;
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto unlock;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!pte_none(*vmf->pte))
goto release;
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto release;
+
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
update_mmu_cache(vma, vmf->address, vmf->pte);
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
- return 0;
+ return ret;
release:
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
int finish_fault(struct vm_fault *vmf)
{
struct page *page;
- int ret;
+ int ret = 0;
/* Did we COW the page? */
if ((vmf->flags & FAULT_FLAG_WRITE) &&
page = vmf->cow_page;
else
page = vmf->page;
- ret = alloc_set_pte(vmf, vmf->memcg, page);
+
+ /*
+ * check even for read faults because we might have lost our CoWed
+ * page
+ */
+ if (!(vmf->vma->vm_flags & VM_SHARED))
+ ret = check_stable_address_space(vmf->vma->vm_mm);
+ if (!ret)
+ ret = alloc_set_pte(vmf, vmf->memcg, page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
mem_cgroup_oom_synchronize(false);
}
- /*
- * This mm has been already reaped by the oom reaper and so the
- * refault cannot be trusted in general. Anonymous refaults would
- * lose data and give a zero page instead e.g. This is especially
- * problem for use_mm() because regular tasks will just die and
- * the corrupted data will not be visible anywhere while kthread
- * will outlive the oom victim and potentially propagate the date
- * further.
- */
- if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
- ret = VM_FAULT_SIGBUS;
-
return ret;
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
- if (vma) {
- up_read(¤t->mm->mmap_sem);
- vma = NULL;
- }
-
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
+#include <linux/ptrace.h>
#include <asm/tlbflush.h>
const int __user *, nodes,
int __user *, status, int, flags)
{
- const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
/*
* Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
- * capabilities, superuser privileges or the same
- * userid as the target process.
+ * process. Use the regular "ptrace_may_access()" checks.
*/
- tcred = __task_cred(task);
- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
- !capable(CAP_SYS_NICE)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out;
NULL)
count += __free_memory_core(start, end);
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
- {
- phys_addr_t size;
-
- /* Free memblock.reserved array if it was allocated */
- size = get_allocated_memblock_reserved_regions_info(&start);
- if (size)
- count += __free_memory_core(start, start + size);
-
- /* Free memblock.memory array if it was allocated */
- size = get_allocated_memblock_memory_regions_info(&start);
- if (size)
- count += __free_memory_core(start, start + size);
- }
-#endif
-
return count;
}
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
int ret;
- lock_page_memcg(page);
+ memcg = lock_page_memcg(page);
+ lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
} else {
ret = TestClearPageWriteback(page);
}
+ /*
+ * NOTE: Page might be free now! Writeback doesn't hold a page
+ * reference on its own, it relies on truncation to wait for
+ * the clearing of PG_writeback. The below can only access
+ * page state that is static across allocation cycles.
+ */
if (ret) {
- dec_lruvec_page_state(page, NR_WRITEBACK);
+ dec_lruvec_state(lruvec, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
}
- unlock_page_memcg(page);
+ __unlock_page_memcg(memcg);
return ret;
}
/* Reinit limits that are based on free pages after the kernel is up */
files_maxfiles_init();
#endif
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+ /* Discard memblock private memory */
+ memblock_discard();
+#endif
for_each_populated_zone(zone)
set_zone_contiguous(zone);
* A cache is never shut down before deactivation is
* complete, so no need to worry about synchronization.
*/
- return;
+ goto out;
#ifdef CONFIG_MEMCG
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
+out:
kobject_put(&s->kobj);
}
struct page **pages;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
+ const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
+ const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
+ 0 :
+ __GFP_HIGHMEM;
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
+ pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
PAGE_KERNEL, node, area->caller);
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
if (node == NUMA_NO_NODE)
- page = alloc_page(alloc_mask);
+ page = alloc_page(alloc_mask|highmem_mask);
else
- page = alloc_pages_node(node, alloc_mask, 0);
+ page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
goto fail;
}
area->pages[i] = page;
- if (gfpflags_allow_blocking(gfp_mask))
+ if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
cond_resched();
}
bpf_target_off(struct sk_buff, tc_index, 2,
target_size));
#else
+ *target_size = 2;
if (type == BPF_WRITE)
*insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
else
*insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#else
+ *target_size = 4;
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#endif
break;
{
struct dccp_sock *dp = dccp_sk(sk);
- /*
- * DCCP doesn't use sk_write_queue, just sk_send_head
- * for retransmissions
- */
+ __skb_queue_purge(&sk->sk_write_queue);
if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
+ if (skb_put_padto(skb, skb->len + padlen))
+ return NULL;
+
nskb = skb;
} else {
nskb = alloc_skb(NET_IP_ALIGN + skb->len +
skb_set_transport_header(nskb,
skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
+
+ if (skb_put_padto(nskb, nskb->len + padlen)) {
+ kfree_skb(nskb);
+ return NULL;
+ }
+
kfree_skb(skb);
}
- /* skb is freed when it fails */
- if (skb_put_padto(nskb, nskb->len + padlen))
- return NULL;
-
tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
tag[0] = 0;
tag[1] = 1 << p->dp->index; /* destination port */
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi)
goto failure;
- fib_info_cnt++;
if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
- if (!fi->fib_metrics)
- goto failure;
+ if (unlikely(!fi->fib_metrics)) {
+ kfree(fi);
+ return ERR_PTR(err);
+ }
atomic_set(&fi->fib_metrics->refcnt, 1);
- } else
+ } else {
fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
-
+ }
+ fib_info_cnt++;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
+ else
+ skb_dst_set(skb, &rt->dst);
}
if (err)
goto errout_free;
- skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
*/
sock_hold(sk);
refcounted = true;
+ if (tcp_filter(sk, skb))
+ goto discard_and_relse;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
}
if (nsk == sk) {
reqsk_put(req);
- } else if (tcp_filter(sk, skb)) {
- goto discard_and_relse;
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
ulp_ops = __tcp_ulp_find_autoload(name);
if (!ulp_ops)
- err = -ENOENT;
- else
- err = ulp_ops->init(sk);
+ return -ENOENT;
- if (err)
- goto out;
+ err = ulp_ops->init(sk);
+ if (err) {
+ module_put(ulp_ops->owner);
+ return err;
+ }
icsk->icsk_ulp_ops = ulp_ops;
- out:
- return err;
+ return 0;
}
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
- if (dev != loopback_dev) {
- if (idev && idev->dev == dev) {
- struct inet6_dev *loopback_idev =
- in6_dev_get(loopback_dev);
- if (loopback_idev) {
- rt->rt6i_idev = loopback_idev;
- in6_dev_put(idev);
- }
+ if (idev && idev->dev != loopback_dev) {
+ struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
+ if (loopback_idev) {
+ rt->rt6i_idev = loopback_idev;
+ in6_dev_put(idev);
}
}
}
/* NETDEV_UNREGISTER could be fired for multiple times by
* netdev_wait_allrefs(). Make sure we only call this once.
*/
- in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
- in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
#endif
}
}
sock_hold(sk);
refcounted = true;
+ if (tcp_filter(sk, skb))
+ goto discard_and_relse;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_filter(sk, skb)) {
- goto discard_and_relse;
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v6_send_reset(nsk, skb);
goto discard_and_relse;
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
kfree_skb(skb2);
kfree_skb(skb);
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
xfrm_state_put(x);
- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0;
}
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
return -ENOBUFS;
}
- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
-
+ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+ sock_net(sk));
return 0;
}
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+ sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
new_hdr->sadb_msg_errno = 0;
}
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
void *ext_hdrs[SADB_EXT_MAX];
int err;
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
return 0;
}
xfrm_ctx->ctx_len);
}
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
}
/* broadcast migrate message to sockets */
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0;
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
+
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get_bss(sdata, addr);
+ if (!sta)
+ goto unlock;
+
+ set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired);
void qdisc_hash_add(struct Qdisc *q, bool invisible)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
- struct Qdisc *root = qdisc_dev(q)->qdisc;
-
- WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
if (invisible)
struct atm_flow_data *flow, *tmp;
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list)
+ list_for_each_entry(flow, &p->flows, list) {
tcf_block_put(flow->block);
+ flow->block = NULL;
+ }
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
* be bound to classes which have been destroyed already. --TGR '04
*/
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
return err;
q->eligible = RB_ROOT;
+ err = tcf_block_get(&q->root.block, &q->root.filter_list);
+ if (err)
+ goto err_tcf;
+
q->root.cl_common.classid = sch->handle;
q->root.refcnt = 1;
q->root.sched = q;
qdisc_watchdog_init(&q->watchdog, sch);
return 0;
+
+err_tcf:
+ qdisc_class_hash_destroy(&q->clhash);
+ return err;
}
static int
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
qdisc_drop(head, sch, to_free);
slot_queue_add(slot, skb);
+ qdisc_tree_reduce_backlog(sch, 0, delta);
return NET_XMIT_CN;
}
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
- if (qlen != slot->qlen)
+ if (qlen != slot->qlen) {
+ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
return NET_XMIT_CN;
+ }
/* As we dropped a packet, better let upper stack know this */
qdisc_tree_reduce_backlog(sch, 1, dropped);
rcu_read_lock();
b = rcu_dereference_rtnl(dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
- (skb->pkt_type <= PACKET_BROADCAST))) {
+ (skb->pkt_type <= PACKET_MULTICAST))) {
skb->next = NULL;
tipc_rcv(dev_net(dev), skb, b);
rcu_read_unlock();
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
+ msg_set_non_seq(hdr, 0);
msg_set_origport(hdr, msg_destport(&ohdr));
msg_set_destport(hdr, msg_origport(&ohdr));
msg_set_destnode(hdr, msg_prevnode(&ohdr));
timer.
config SND_SEQ_MIDI_EVENT
- def_tristate SND_RAWMIDI
+ tristate
config SND_SEQ_MIDI
- tristate
+ def_tristate SND_RAWMIDI
select SND_SEQ_MIDI_EVENT
config SND_SEQ_MIDI_EMUL
static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
{
struct snd_seq_queue_info *info = arg;
- int result;
struct snd_seq_queue *q;
- result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
- if (result < 0)
- return result;
-
- q = queueptr(result);
- if (q == NULL)
- return -EINVAL;
+ q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
info->queue = q->queue;
info->locked = q->locked;
if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
strlcpy(q->name, info->name, sizeof(q->name));
- queuefree(q);
+ snd_use_lock_free(&q->use_lock);
return 0;
}
static void queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue -
- * return queue index value or negative value for error
+ * return pointer to new queue or ERR_PTR(-errno) for error
+ * The new queue's use_lock is set to 1. It is the caller's responsibility to
+ * call snd_use_lock_free(&q->use_lock).
*/
-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
{
struct snd_seq_queue *q;
q = queue_new(client, locked);
if (q == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
q->info_flags = info_flags;
queue_use(q, client, 1);
+ snd_use_lock_use(&q->use_lock);
if (queue_list_add(q) < 0) {
+ snd_use_lock_free(&q->use_lock);
queue_delete(q);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
- return q->queue;
+ return q;
}
/* delete a queue - queue must be owned by the client */
/* create new queue (constructor) */
-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
/* delete queue (destructor) */
int snd_seq_queue_delete(int client, int queueid);
{
struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
- if (emu->support_tlv)
- return copy_from_user(gctl, &_gctl[idx], sizeof(*gctl));
+ if (emu->support_tlv) {
+ if (in_kernel)
+ memcpy(gctl, (void *)&_gctl[idx], sizeof(*gctl));
+ else if (copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)))
+ return -EFAULT;
+ return 0;
+ }
+
octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
- if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
+ if (in_kernel)
+ memcpy(gctl, (void *)&octl[idx], sizeof(*octl));
+ else if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
return -EFAULT;
gctl->tlv = NULL;
return 0;
SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
- {0x13, 0xb8a61140},
{0x17, 0x90170110}),
{}
};
if (size < sizeof(scale))
return -ENOMEM;
+ if (cval->min_mute)
+ scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
scale[2] = cval->dBmin;
scale[3] = cval->dBmax;
if (copy_to_user(_tlv, scale, sizeof(scale)))
int cached;
int cache_val[MAX_CHANNELS];
u8 initialized;
+ u8 min_mute;
void *private_data;
};
if (unitid == 7 && cval->control == UAC_FU_VOLUME)
snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break;
+ /* lowest playback value is muted on C-Media devices */
+ case USB_ID(0x0d8c, 0x000c):
+ case USB_ID(0x0d8c, 0x0014):
+ if (strstr(kctl->id.name, "Playback"))
+ cval->min_mute = 1;
+ break;
}
}
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
}
}
break;
+ case USB_ID(0x16d0, 0x0a23):
+ if (fp->altsetting == 2)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
default:
break;
done
override define RUN_TESTS
- @if [ `dirname $(OUTPUT)` = $(PWD) ]; then ./run.sh; fi
+ $(OUTPUT)/run.sh
endef
override define INSTALL_RULE
echo " all Runs all tests (default)"
echo " -t Run test ID the number amount of times is recommended"
echo " -w Watch test ID run until it runs into an error"
- echo " -c Run test ID once"
- echo " -s Run test ID x test-count number of times"
+ echo " -s Run test ID once"
+ echo " -c Run test ID x test-count number of times"
echo " -l List all test ID list"
echo " -h|--help Help"
echo
printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
1e9 * precision);
- if (precision > MAX_PRECISION) {
- printf("[SKIP]\n");
- ksft_exit_skip();
- }
+ if (precision > MAX_PRECISION)
+ ksft_exit_skip("precision: %.0f ns > MAX_PRECISION: %.0f ns\n",
+ 1e9 * precision, 1e9 * MAX_PRECISION);
printf("[OK]\n");
srand(ts.tv_sec ^ ts.tv_nsec);