--- /dev/null
+Micrel KSZ9021 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+All skew control options are specified in picoseconds. The minimum
+value is 0, and the maximum value is 3000.
+
+Optional properties:
+ - rxc-skew-ps : Skew control of RXC pad
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txc-skew-ps : Skew control of TXC pad
+ - txen-skew-ps : Skew control of TX_CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+ /* Attach to an Ethernet device with autodetected PHY */
+ &enet {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ status = "okay";
+ };
+
+ /* Attach to an explicitly-specified PHY */
+ mdio {
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ reg = <0>;
+ };
+ };
+ ethernet@70000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
improve throughput, but will also increase the
amount of memory reserved for use by the client.
- swapaccount[=0|1]
+ swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
it if 0 is given (See Documentation/cgroups/memory.txt)
=========================
This feature adds Linux 2.2-like transparent proxy support to current kernels.
-To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in
-your kernel config. You will need policy routing too, so be sure to enable that
-as well.
+To use it, enable the socket match and the TPROXY target in your kernel config.
+You will need policy routing too, so be sure to enable that as well.
1. Making non-local sockets work
F: include/linux/i2c-omap.h
OMAP DEVICE TREE SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
F: drivers/char/hw_random/omap-rng.c
OMAP HWMOD SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
-M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-gru/
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
-M: Robin Holt <holt@sgi.com>
+M: Cliff Whickman <cpw@sgi.com>
+M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
F: arch/m68k/include/asm/sun3*
F: drivers/net/ethernet/i825xx/sun3*
+SUNDANCE NETWORK DRIVER
+M: Denis Kirjanov <kda@linux-powerpc.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/dlink/sundance.c
+
SUPERH
M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org
VERSION = 3
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Linux for Workgroups
# *DOCUMENTATION*
compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
chosen {
- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
};
memory {
- reg = <0x20000000 0x10000000>;
+ reg = <0x20000000 0x8000000>;
};
clocks {
usb0: ohci@00600000 {
status = "okay";
- num-ports = <2>;
- atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW
+ num-ports = <3>;
+ atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
+ &pioD 19 GPIO_ACTIVE_LOW
&pioD 20 GPIO_ACTIVE_LOW
>;
};
macb1: ethernet@f802c000 {
phy-mode = "rmii";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&pioE>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+ reg = <1>;
+ };
};
pinctrl@fffff200 {
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 24 0>; /* PD0 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 170 0>; /* PV2 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+ regulator-always-on;
+ regulator-boot-on;
};
vbus3_reg: regulator@3 {
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
{
return 1 << mpidr_hash.bits;
}
+
+extern int platform_can_cpu_hotplug(void);
+
#endif
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
- if (tmp == 0) {
+ if (!contended) {
smp_mb();
return 1;
} else {
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
- smp_mb();
- return tmp2 == 0;
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
.endm
.macro kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
void set_fiq_handler(void *start, unsigned int length)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- void *base = (void *)0xffff0000;
-#else
void *base = vectors_page;
-#endif
unsigned offset = FIQ_OFFSET;
memcpy(base + offset, start, length);
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range(base + offset, offset + length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
- if (!vectors_high())
- flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
__be32 header;
int i, err;
+ /*
+ * Validate that if the current HW supports SMP, then the SW supports
+ * and implements CPU hotplug for the current HW. If not, we won't be
+ * able to kexec reliably, so fail the prepare operation.
+ */
+ if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+ return -EINVAL;
+
/*
* No segment at default ATAGs address. try to locate
* a dtb using magic.
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
- if (num_online_cpus() > 1) {
- pr_err("kexec: error: multiple CPUs still online\n");
- return;
- }
+ /*
+ * This can only happen if machine_shutdown() failed to disable some
+ * CPU, and that can only happen if the checks in
+ * machine_kexec_prepare() were not correct. If this fails, we can't
+ * reliably kexec anyway, so BUG_ON is appropriate.
+ */
+ BUG_ON(num_online_cpus() > 1);
page_list = image->head & PAGE_MASK;
int mapping;
if (config >= PERF_COUNT_HW_MAX)
- return -ENOENT;
+ return -EINVAL;
mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
{
return in_gate_area(NULL, addr);
}
-#define is_gate_vma(vma) ((vma) = &gate_vma)
+#define is_gate_vma(vma) ((vma) == &gate_vma)
#else
#define is_gate_vma(vma) 0
#endif
return -ENOSYS;
}
+int platform_can_cpu_hotplug(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (smp_ops.cpu_kill)
+ return 1;
+#endif
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);
#define access_pmintenclr pm_fake
/* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */
NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */
- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
NULL, reset_unknown, c6_IFAR },
/* PAR swapped by interrupt.S */
- { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
/*
* DC{C,I,CI}SW operations:
| KVM_REG_ARM_OPC1_MASK))
return false;
params->is_64bit = true;
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ /* CRm to CRn: see cp15_to_index for details */
+ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0;
- params->CRn = 0;
+ params->CRm = 0;
return true;
default:
return false;
if (reg->is_64) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ /*
+ * CRn always denotes the primary coproc. reg. nr. for the
+ * in-kernel representation, but the user space API uses the
+ * CRm for the encoding, because it is modelled after the
+ * MRRC/MCRR instructions: see the ARM ARM rev. c page
+ * B3-1445
+ */
+ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else {
val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
+ if (i1->is_64 != i2->is_64)
+ return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
+#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
/*
* A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
- unsigned long rt, len;
+ unsigned long rt;
+ int len;
bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
return p;
}
+static bool page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
put_page(virt_to_page(pmd));
}
-static bool pmd_empty(pmd_t *pmd)
-{
- struct page *pmd_page = virt_to_page(pmd);
- return page_count(pmd_page) == 1;
-}
-
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
}
}
-static bool pte_empty(pte_t *pte)
-{
- struct page *pte_page = virt_to_page(pte);
- return page_count(pte_page) == 1;
-}
-
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
{
pmd_t *pmd;
pte_t *pte;
unsigned long long addr = start, end = start + size;
- u64 range;
+ u64 next;
while (addr < end) {
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
- addr += PUD_SIZE;
+ addr = pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- addr += PMD_SIZE;
+ addr = pmd_addr_end(addr, end);
continue;
}
pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr);
- range = PAGE_SIZE;
+ next = addr + PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */
- if (pte_empty(pte)) {
+ if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
- range = PMD_SIZE;
- if (pmd_empty(pmd)) {
+ next = pmd_addr_end(addr, end);
+ if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
- range = PUD_SIZE;
+ next = pud_addr_end(addr, end);
}
}
- addr += range;
+ addr = next;
}
}
CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
};
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
.timing = &davinci_evm_nandflash_timing,
};
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.options = 0,
};
.parts = davinci_ntosd2_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
};
};
static struct musb_hdrc_platform_data tusb_data = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
.set_power = tusb_set_power,
.min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
.power = 100, /* Max 100 mA VBUS for host mode */
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_PERIPHERAL,
+ .mode = MUSB_OTG,
.power = 0,
};
};
static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
+
/* .clock is set dynamically */
.config = &musb_config,
static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6,
- .board_specific_setup = NULL,
- .power_enable = NULL,
- .transceiver_enable = NULL
};
static struct spi_board_info mcp251x_board_info[] = {
#include <linux/i2c/pca953x.h>
#include <linux/apm-emulation.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
#include <asm/suspend.h>
};
/* CAN bus on SPI */
-static int zeus_mcp2515_setup(struct spi_device *sdev)
-{
- int err;
-
- err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown");
- if (err)
- return err;
+static struct regulator_consumer_supply can_regulator_consumer =
+ REGULATOR_SUPPLY("vdd", "spi3.0");
- err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1);
- if (err) {
- gpio_free(ZEUS_CAN_SHDN_GPIO);
- return err;
- }
+static struct regulator_init_data can_regulator_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &can_regulator_consumer,
+ .num_consumer_supplies = 1,
+};
- return 0;
-}
+static struct fixed_voltage_config can_regulator_pdata = {
+ .supply_name = "CAN_SHDN",
+ .microvolts = 3300000,
+ .gpio = ZEUS_CAN_SHDN_GPIO,
+ .init_data = &can_regulator_init_data,
+};
-static int zeus_mcp2515_transceiver_enable(int enable)
-{
- gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable);
- return 0;
-}
+static struct platform_device can_regulator_device = {
+ .name = "reg-fixed-volage",
+ .id = -1,
+ .dev = {
+ .platform_data = &can_regulator_pdata,
+ },
+};
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
- .board_specific_setup = zeus_mcp2515_setup,
- .power_enable = zeus_mcp2515_transceiver_enable,
};
static struct spi_board_info zeus_spi_board_info[] = {
&zeus_leds_device,
&zeus_pcmcia_device,
&zeus_max6369_device,
+ &can_regulator_device,
};
/* AC'97 */
static struct sh_eth_plat_data sh_eth_platdata = {
.phy = 0x00, /* LAN8710A */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct sh_eth_plat_data ether_platform_data __initdata = {
.phy = 0x01,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_RCAR,
.phy_interface = PHY_INTERFACE_MODE_RMII,
/*
* Although the LINK signal is available on the board, it's connected to
printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
- if (cpu->map_io == NULL || cpu->init == NULL) {
+ if (cpu->init == NULL) {
printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
panic("Unsupported Samsung CPU");
}
- cpu->map_io();
+ if (cpu->map_io)
+ cpu->map_io();
}
/* s3c24xx_init_clocks
per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0);
+ put_cpu();
}
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
+#define PAR_EL1 21 /* Physical Address Register */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 21 /* Domain Access Control Register */
-#define IFSR32_EL2 22 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
-#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 27
+#define DACR32_EL2 22 /* Domain Access Control Register */
+#define IFSR32_EL2 23 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
+#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */
- u32 target;
+ int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
mrs x21, tpidr_el1
mrs x22, amair_el1
mrs x23, cntkctl_el1
+ mrs x24, par_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
+ str x24, [x3, #160]
.endm
.macro restore_sysregs
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
+ ldr x24, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
msr tpidr_el1, x21
msr amair_el1, x22
msr cntkctl_el1, x23
+ msr par_el1, x24
.endm
.macro skip_32bit_state tmp, target
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa)
+ dsb ishst
+
kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2
ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context)
+ dsb ishst
tlbi alle1is
ic ialluis
dsb sy
*/
tbnz x1, #7, 1f // S1PTW is set
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ push x3, xzr
+
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
/* Read result */
mrs x3, par_el1
+ pop x0, xzr // Restore PAR_EL1 from the stack
+ msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
#include <asm/machdep.h>
#include <asm/natfeat.h>
+extern long nf_get_id2(const char *feature_name);
+
asm("\n"
-" .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+" .global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
-" .long nf_get_id,1b\n"
+" .long nf_get_id2,1b\n"
" .long nf_call,1b\n"
" .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
+long nf_get_id(const char *feature_name)
+{
+ /* feature_name may be in vmalloc()ed memory, so make a copy */
+ char name_copy[32];
+ size_t n;
+
+ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+ if (n >= sizeof(name_copy))
+ return 0;
+
+ return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
void nfprint(const char *fmt, ...)
{
static char buf[256];
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
+ unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
- : "=d" (__n.n32[0]), "=d" (__upper) \
- : "d" (base), "0" (__n.n32[0])); \
+ : "=d" (__n.n32[0]), "=d" (__upper) \
+ : "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
- : "=d" (__n.n32[1]), "=d" (__rem) \
- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+ : "=d" (__n.n32[1]), "=d" (__rem) \
+ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
dec_insn.next_pc_inc;
return 1;
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+#endif
case cop0_op:
case cop1_op:
case cop2_op:
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
- unsigned long start, unsigned long end;
+ unsigned long start, end;
};
struct mmu_table_batch {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1
};
static struct resource sh_eth_resources[] = {
[0] = {
.start = SH_ETH_ADDR,
- .end = SH_ETH_ADDR + 0x1FC,
+ .end = SH_ETH_ADDR + 0x1FC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8187 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device sh_eth_device = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_eth.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
},
};
+static struct sh_eth_plat_data eth_platform_data = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct resource eth_resources[] = {
[0] = {
.start = 0xfb000000,
- .end = 0xfb0001c8,
+ .end = 0xfb0001c7,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "sh7619-ether",
.id = -1,
.dev = {
- .platform_data = (void *)1,
+ .platform_data = ð_platform_data,
},
.num_resources = ARRAY_SIZE(eth_resources),
.resource = eth_resources,
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->olpc_ofw_header, 0,
+ memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
- (char *)&boot_params->olpc_ofw_header);
+ (char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
#ifdef CONFIG_MICROCODE_AMD_EARLY
#ifdef CONFIG_X86_32
static const int amd_erratum_383[];
static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c)
{
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
- if (cpu_has_amd_erratum(amd_erratum_383))
+ if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
- if (cpu_has_amd_erratum(amd_erratum_400))
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;
return 0;
}
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
unsigned int size)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 max_size;
#define F1XH_MPB_MAX_SIZE 2048
#define F15H_MPB_MAX_SIZE 4096
#define F16H_MPB_MAX_SIZE 3458
- switch (c->x86) {
+ switch (family) {
case 0x14:
max_size = F14H_MPB_MAX_SIZE;
break;
* driver cannot continue functioning normally. In such cases, we tear
* down everything we've used up so far and exit.
*/
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_header_amd *mc_hdr;
struct ucode_patch *patch;
unsigned int patch_size, crnt_size, ret;
/* check if patch is for the current family */
proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
- if (proc_fam != c->x86)
+ if (proc_fam != family)
return crnt_size;
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
return crnt_size;
}
- ret = verify_patch_size(cpu, patch_size, leftover);
+ ret = verify_patch_size(family, patch_size, leftover);
if (!ret) {
pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
return crnt_size;
return crnt_size;
}
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ size_t size)
{
enum ucode_state ret = UCODE_ERROR;
unsigned int leftover;
}
while (leftover) {
- crnt_size = verify_and_add_patch(cpu, fw, leftover);
+ crnt_size = verify_and_add_patch(family, fw, leftover);
if (crnt_size < 0)
return ret;
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
- ret = __load_microcode_amd(cpu, data, size);
+ ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
cleanup();
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */
- if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
- struct ucode_patch *p = find_patch(cpu);
+ if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+ struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
goto fw_release;
}
- ret = load_microcode_amd(cpu, fw->data, fw->size);
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
- struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
eax = cpuid_eax(0x00000001);
- uci->cpu_sig.sig = eax;
uci->cpu_sig.rev = rev;
- c->microcode = rev;
- c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
- unsigned int cpu = smp_processor_id();
-
- collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+ uci->cpu_sig.sig = eax;
if (cpu && !ucode_loaded) {
void *ucode;
return;
ucode = (void *)(initrd_start + ucode_offset);
- if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
return;
+
ucode_loaded = true;
}
{
enum ucode_state ret;
void *ucode;
+ u32 eax;
+
#ifdef CONFIG_X86_32
unsigned int bsp = boot_cpu_data.cpu_index;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
return 0;
ucode = (void *)(initrd_start + ucode_offset);
- ret = load_microcode_amd(0, ucode, ucode_size);
+ eax = cpuid_eax(0x00000001);
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+ ret = load_microcode_amd(eax, ucode, ucode_size);
if (ret != UCODE_OK)
return -EINVAL;
*begin = new_begin;
}
} else {
- *begin = mmap_legacy_base();
+ *begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
-unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(void)
{
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_base();
+
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_legacy_base();
+ mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
- rc = native_cpu_up(cpu, tidle);
- WARN_ON (xen_smp_intr_init(cpu));
+ /*
+ * xen_smp_intr_init() needs to run before native_cpu_up()
+ * so that IPI vectors are set up on the booting CPU before
+ * it is marked online in native_cpu_up().
+ */
+ rc = xen_smp_intr_init(cpu);
+ WARN_ON(rc);
+ if (!rc)
+ rc = native_cpu_up(cpu, tidle);
return rc;
}
struct sg_table *sg,
enum dma_data_direction dir)
{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
u32 power_well_driver;
+ int num_transcoders;
+
struct intel_cursor_error_state {
u32 control;
u32 position;
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
- enum transcoder cpu_transcoder;
- u32 conf;
u32 source;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
};
struct intel_display_error_state *
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
int i;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->pipe[i].cpu_transcoder = cpu_transcoder;
-
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
/* In the code above we read the registers without checking if the power
{
int i;
+ if (!error)
+ return;
+
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " CPU transcoder: %c\n",
- transcoder_name(error->pipe[i].cpu_transcoder));
- err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
}
#endif
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
return r;
}
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- struct entry *e = hash_lookup(mq, oblock);
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ mutex_lock(&mq->lock);
+
+ e = hash_lookup(mq, oblock);
BUG_ON(!e || !e->in_cache);
del(mq, e);
e->in_cache = false;
push(mq, e);
-}
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- remove_mapping(mq, oblock);
mutex_unlock(&mq->lock);
}
dev_mc_add(slave_dev, lacpdu_multicast);
}
- if (vlan_vids_add_by_dev(slave_dev, bond_dev)) {
+ res = vlan_vids_add_by_dev(slave_dev, bond_dev);
+ if (res) {
pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
bond_dev->name, slave_dev->name);
goto err_close;
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_candev(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+ if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
priv->raminit = c_can_hw_raminit;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_flexcandev(dev);
*
* static struct mcp251x_platform_data mcp251x_info = {
* .oscillator_frequency = 8000000,
- * .board_specific_setup = &mcp251x_setup,
- * .power_enable = mcp251x_power_enable,
- * .transceiver_enable = NULL,
* };
*
* static struct spi_board_info spi_board_info[] = {
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ struct regulator *power;
+ struct regulator *transceiver;
};
#define MCP251X_IS(_model) \
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
+static int mcp251x_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
free_irq(spi->irq, priv);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
}
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
close_candev(net);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->can.state = CAN_STATE_STOPPED;
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- unsigned long flags;
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
int ret;
ret = open_candev(net);
}
mutex_lock(&priv->mcp_lock);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
priv->force_quit = 0;
priv->tx_skb = NULL;
priv->tx_len = 0;
- flags = IRQF_ONESHOT;
- if (pdata->irq_flags)
- flags |= pdata->irq_flags;
- else
- flags |= IRQF_TRIGGER_FALLING;
-
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
flags, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
goto open_unlock;
}
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+
+ priv->power = devm_regulator_get(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+ (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto error_power;
+ }
+
+ ret = mcp251x_power_enable(priv->power, 1);
+ if (ret)
+ goto error_power;
+
spi_set_drvdata(spi, priv);
priv->spi = spi;
}
}
- if (pdata->power_enable)
- pdata->power_enable(1);
-
- /* Call out to platform specific setup */
- if (pdata->board_specific_setup)
- pdata->board_specific_setup(spi);
-
SET_NETDEV_DEV(net, &spi->dev);
/* Configure the SPI bus */
- spi->mode = SPI_MODE_0;
+ spi->mode = spi->mode ? : SPI_MODE_0;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
spi->bits_per_word = 8;
spi_setup(spi);
/* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
- dev_info(&spi->dev, "Probe failed\n");
+ ret = -ENODEV;
goto error_probe;
}
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
-
ret = register_candev(net);
if (ret)
goto error_probe;
if (!mcp251x_enable_dma)
kfree(priv->spi_tx_buf);
error_tx_buf:
- free_candev(net);
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
+ mcp251x_power_enable(priv->power, 0);
+error_power:
+ free_candev(net);
error_alloc:
- if (pdata->power_enable)
- pdata->power_enable(0);
dev_err(&spi->dev, "probe failed\n");
error_out:
return ret;
static int mcp251x_can_remove(struct spi_device *spi)
{
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
unregister_candev(net);
- free_candev(net);
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
kfree(priv->spi_rx_buf);
}
- if (pdata->power_enable)
- pdata->power_enable(0);
+ mcp251x_power_enable(priv->power, 0);
+
+ free_candev(net);
return 0;
}
static int mcp251x_can_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
netif_device_detach(net);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->after_suspend = AFTER_SUSPEND_UP;
} else {
priv->after_suspend = AFTER_SUSPEND_DOWN;
}
- if (pdata->power_enable) {
- pdata->power_enable(0);
+ if (!IS_ERR(priv->power)) {
+ regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER;
}
static int mcp251x_can_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
- pdata->power_enable(1);
+ mcp251x_power_enable(priv->power, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
if (!bcm_enet_shared_base[0])
return -ENODEV;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+ if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
if (ret)
goto out;
- priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
- if (priv->base == NULL) {
- ret = -ENOMEM;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto out;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_TX_RESUME,
};
struct bnx2x_prev_path_list {
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
+ set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
}
/* ets may affect cmng configuration: reinit it in hw */
bnx2x_set_local_cmng(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+ set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- if (bp->link_vars.link_up) {
+ bnx2x_init_dropless_fc(bp);
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
-
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
+ if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+ bnx2x_dcbx_resume_hw_tx(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp)
{
- DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
- if (!IS_SRIOV(bp))
- return;
-
- REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
}
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
return -EINVAL;
}
- if (!vf) {
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
return 0;
}
struct ifla_vf_info *ivi)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- if (!mac_obj || !vlan_obj || !bulletin) {
+ mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
if (!is_valid_ether_addr(mac)) {
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
+ struct device_node *np;
int err = -ENXIO, i;
/* Enable management port */
bp->mii_bus->parent = &bp->dev->dev;
pdata = bp->pdev->dev.platform_data;
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
-
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!bp->mii_bus->irq) {
err = -ENOMEM;
goto err_out_free_mdiobus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- if (mdiobus_register(bp->mii_bus))
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
goto err_out_free_mdio_irq;
if (macb_mii_probe(bp->dev) != 0) {
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o
+ enic_ethtool.o enic_api.o
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.39"
-#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.43"
+#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
#ifdef CONFIG_PCI_IOV
u16 num_vfs;
#endif
+ spinlock_t enic_api_lock;
struct enic_port_profile *pp;
/* work queue cache line section */
--- /dev/null
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_api.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_dev *vdev = enic->vdev;
+
+ spin_lock(&enic->enic_api_lock);
+ spin_lock(&enic->devcmd_lock);
+
+ vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+ err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
+ vnic_dev_cmd_proxy_end(vdev);
+
+ spin_unlock(&enic->devcmd_lock);
+ spin_unlock(&enic->enic_api_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
--- /dev/null
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __ENIC_API_H__
+#define __ENIC_API_H__
+
+#include <linux/netdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
+
+#endif
rtnl_lock();
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
}
*/
spin_lock_init(&enic->devcmd_lock);
+ spin_lock_init(&enic->enic_api_lock);
/*
* Set ingress vlan rewrite mode before vnic initialization
int offload_mode, int cq_entry, int sop, int eop, int loopback)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ u8 desc_skip_cnt = 1;
+ u8 compressed_send = 0;
+ u64 wrid = 0;
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)vlan_tag,
(u8)loopback);
- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+ (u8)cq_entry, compressed_send, wrid);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u64 wrid = 0;
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
}
struct enic;
* 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /* Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
/* init_prov_info2:
* Variant of CMD_INIT_PROV_INFO, where it will not try to enable
* the vnic until CMD_ENABLE2 is issued.
* (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (u32)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
/* enable2:
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
/*
- * cmd_set_mac_addr
- * set mac address
+ * Set the predefined mac address as default
* in:
* (u48)a0 = mac addr
- *
*/
CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /* Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /* Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /* Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /* Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /* Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /* Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
};
/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
/* flags for CMD_OPEN */
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
ERR_EMAXRES = 10,
ERR_ENOTSUPPORTED = 11,
ERR_EINPROGRESS = 12,
+ ERR_MAX
};
/*
u8 data[0];
};
+/* These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4) | \
+ FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __packed;
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __packed;
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __packed;
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __packed;
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_MAX
+};
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ } u;
+} __packed;
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+/* Maximum size of buffer to CMD_ADD_FILTER */
+#define FILTER_MAX_BUF_SIZE 100
+
+struct filter_tlv {
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
+};
+
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
- vdev = rq->vdev;
-
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!rq->bufs[i])
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index;
+ u32 fetch_index = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
unsigned int len;
unsigned int index;
void *desc;
+ uint64_t wr_id;
};
struct vnic_rq {
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
+ dma_addr_t dma_addr, unsigned int len,
+ uint64_t wrid)
{
struct vnic_rq_buf *buf = rq->to_use;
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
rq->to_use = buf;
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
- vdev = wq->vdev;
-
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i])
unsigned int index;
int sop;
void *desc;
+ uint64_t wr_id; /* Cookie */
+ uint8_t cq_entry; /* Gets completion event from hw */
+ uint8_t desc_skip_cnt; /* Num descs to occupy */
+ uint8_t compressed_send; /* Both hdr and payload in one desc */
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop, int eop)
+ unsigned int len, int sop, int eop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
if (eop) {
}
wq->to_use = buf;
- wq->ring.desc_avail--;
+ wq->ring.desc_avail -= desc_skip_cnt;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
static const struct net_device_ops netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
};
static int sundance_probe1(struct pci_dev *pdev,
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- be_tx_compl_clean(adapter);
netif_tx_disable(netdev);
+ be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
- struct device *dev = &of->dev;
- struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
- dev_set_drvdata(dev, NULL);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
struct sk_buff *skb)
{
struct sk_buff *new_skb;
- struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
struct fs_enet_private *fep;
struct fs_platform_info *fpi;
const u32 *data;
+ struct clk *clk;
+ int err;
const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
fpi->use_rmii = 1;
}
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ ret = err;
+ goto out_free_fpi;
+ }
+ fpi->clk_per = clk;
+ }
+
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring);
free_netdev(ndev);
out_put:
of_node_put(fpi->phy_node);
+ if (fpi->clk_per)
+ clk_disable_unprepare(fpi->clk_per);
out_free_fpi:
kfree(fpi);
return ret;
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
of_node_put(fep->fpi->phy_node);
+ if (fep->fpi->clk_per)
+ clk_disable_unprepare(fep->fpi->clk_per);
free_netdev(ndev);
return 0;
}
priv->regs = priv->map + data->mii_offset;
new_bus->parent = &pdev->dev;
- dev_set_drvdata(&pdev->dev, new_bus);
+ platform_set_drvdata(pdev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
mdiobus_unregister(bus);
- dev_set_drvdata(device, NULL);
-
iounmap(priv->map);
mdiobus_free(bus);
static int ucc_geth_remove(struct platform_device* ofdev)
{
- struct device *device = &ofdev->dev;
- struct net_device *dev = dev_get_drvdata(device);
+ struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
- dev_set_drvdata(device, NULL);
return 0;
}
DBG(dev, "remove" NL);
- dev_set_drvdata(&ofdev->dev, NULL);
-
unregister_netdev(dev->ndev);
cancel_work_sync(&dev->reset_work);
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
- if (jiffies >= (time + 20)) {
+ if (time_after(jiffies, time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
#define MAX_PS_BUFFERS 4
/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
+ *
+ * We don't have the associated upstream bridge while assigning
+ * the PCI device into guest. For example, the KVM on power is
+ * one of the cases.
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
u16 devctl;
+ if (!us_dev)
+ return 0;
+
pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
return 0;
}
-#ifdef CONFIG_PCIEASPM
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+/**
+ * e1000e_disable_aspm - Disable ASPM states
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc, parent_aspmc;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ /* fall-through - can't have L1 without L0s */
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ return;
+ }
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspmc);
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ /* Nothing to do if the ASPM states to be disabled already are */
+ if (!(pdev_aspmc & aspm_dis_mask) &&
+ (!parent || !(parent_aspmc & aspm_dis_mask)))
+ return;
+
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
+ "L0s" : "",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
+ "L1" : "");
+
+#ifdef CONFIG_PCIEASPM
pci_disable_link_state_locked(pdev, state);
-}
-#else
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- u16 aspm_ctl = 0;
- if (state & PCIE_LINK_STATE_L0S)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & PCIE_LINK_STATE_L1)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc))
+ return;
+#endif
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
-
- if (pdev->bus->self)
- pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- aspm_ctl);
-}
-#endif
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
- (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
- __e1000e_disable_aspm(pdev, state);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_dis_mask);
}
#ifdef CONFIG_PM
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
+
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size = 15;
nvm->word_size = 1 << size;
- if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
-
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
- 16 : 8;
- break;
- }
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
- nvm->type = e1000_nvm_eeprom_spi;
- } else {
- nvm->type = e1000_nvm_flash_hw;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
}
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
/* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ nvm->ops.release = igb_release_nvm_82575;
+ nvm->ops.write = igb_write_nvm_spi;
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ /* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
- break;
- case e1000_i210:
- nvm->ops.validate = igb_validate_nvm_checksum_i210;
- nvm->ops.update = igb_update_nvm_checksum_i210;
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_srrd_i210;
- nvm->ops.write = igb_write_nvm_srwr_i210;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- break;
- case e1000_i211:
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_i211;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- nvm->ops.validate = NULL;
- nvm->ops.update = NULL;
- nvm->ops.write = NULL;
break;
default:
- nvm->ops.validate = igb_validate_nvm_checksum;
- nvm->ops.update = igb_update_nvm_checksum;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
}
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
/* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = igb_init_nvm_params_i210(hw);
+ break;
+ default:
+ break;
+ }
+
if (ret_val)
goto out;
**/
static s32 igb_reset_hw_82575(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
/* Install any alternate MAC address into RAR0 */
ret_val = igb_check_alt_mac_addr(hw);
s32 ret_val = 0;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl, icr;
+ u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
ret_val = igb_reset_mdicnfg_82580(hw);
if (ret_val)
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
-
-/* NVM version defines */
#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_BUILD_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
}
/**
- * igb_read_nvm_i211 - Read NVM wrapper function for I211
+ * igb_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ hw_dbg("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
* @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
-s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 words __always_unused, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
- ret_val = igb_read_invm_i211(hw, offset, &data[0]);
- ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
- ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
- igb_read_invm_i211(hw, offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
return ret_val;
}
-/**
- * igb_read_invm_i211 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == (u8)address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
/**
* igb_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
return ret_val;
}
+/**
+ * igb_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ eec = rd32(E1000_EECD);
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
{
return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
+
+/**
+ * igb_init_nvm_params_i210 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igb_acquire_nvm_i210;
+ nvm->ops.release = igb_release_nvm_i210;
+ nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+ /* NVM Function Pointers */
+ if (igb_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
+ nvm->ops.validate = igb_validate_nvm_checksum_i210;
+ nvm->ops.update = igb_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = igb_read_invm_i210;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return ret_val;
+}
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 data);
+extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
**/
void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 fw_version;
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+ /* basic eeprom version numbers and bits used vary by part and by tool
+ * used to create the nvm images. Check which data format we have.
+ */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
switch (hw->mac.type) {
case e1000_i211:
igb_read_invm_version(hw, fw_vers);
case e1000_82575:
case e1000_82576:
case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- case e1000_i210:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
break;
- default:
- return;
- }
- /* basic eeprom version numbers */
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
-
- /* etrack id */
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
-
- switch (hw->mac.type) {
case e1000_i210:
- case e1000_i354:
+ if (!(igb_get_flash_presence_i210(hw))) {
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
- ((comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT));
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
- break;
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
}
return;
}
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
+ u16 eep_build;
u8 invm_major;
u8 invm_minor;
};
#endif
+#define IGB_RETA_SIZE 128
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
+ u32 rss_indir_tbl_init;
+ u8 rss_indir_tbl[IGB_RETA_SIZE];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
+extern void igb_write_rss_indir_tbl(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
+ /* Validate eeprom on all parts but flashless */
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+ *data = 2;
+ }
+ break;
+ default:
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
+ break;
}
return *data;
igb_set_eee_i350(hw);
/* reset link */
- if (!netif_running(netdev))
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
igb_reset(adapter);
}
pm_runtime_put(&adapter->pdev->dev);
}
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGB_RETA_SIZE;
+}
+
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
+ break;
+ default:
+ break;
+ }
+
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 num_queues;
+
+ num_queues = adapter->rss_queues;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igb_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* ixgbe_get_stats64() might access the rings on this vector,
+ /* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
igb_get_fw_version(hw, &fw);
switch (hw->mac.type) {
+ case e1000_i210:
case e1000_i211:
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
-
+ if (!(igb_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
fw.eep_major, fw.eep_minor, fw.etrack_id,
fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
- } else {
+ } else if (fw.etrack_id != 0X0000) {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
*/
hw->mac.ops.reset_hw(hw);
- /* make sure the NVM is good , i211 parts have special NVM that
- * doesn't contain a checksum
+ /* make sure the NVM is good , i211/i210 parts can have special NVM
+ * that doesn't contain a checksum
*/
- if (hw->mac.type != e1000_i211) {
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev,
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+ break;
+ default:
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
+ break;
}
/* copy the MAC address out of the NVM */
int err = 0;
int i;
+ if (!adapter->msix_entries) {
+ err = -EPERM;
+ goto out;
+ }
+
if (!num_vfs)
goto out;
else if (old_vfs && old_vfs == num_vfs)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0;
+ u32 j, num_rx_queues;
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
num_rx_queues = adapter->rss_queues;
switch (hw->mac.type) {
- case e1000_82575:
- shift = 6;
- break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count) {
- shift = 3;
+ if (adapter->vfs_allocated_count)
num_rx_queues = 2;
- }
break;
default:
break;
}
- /* Populate the indirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (j = 0; j < 32; j++) {
- /* first pass generates n and n+2 */
- u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
- u32 reta = (base & 0x07800780) >> (7 - shift);
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * num_rx_queues;
- reta |= (base & 0x07800780) << (1 + shift);
-
- wr32(E1000_RETA(j), reta);
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
}
+ igb_write_rss_indir_tbl(adapter);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
- s32 ret_val = 0;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = true;
- }
- break;
+ if (!hw->mac.get_link_status)
+ return true;
case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = hw->mac.serdes_has_link;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
break;
default:
case e1000_media_type_unknown:
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
bytes = 0;
packets = 0;
+
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
+ rcu_read_unlock();
/* read stats registers */
adapter->stats.crcerrs += rd32(E1000_CRCERRS);
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u32 lo, hi;
u64 val;
- u32 lo, hi, jk;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec, jk;
+ u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
nsec = rd32(E1000_SYSTIML);
sec = rd32(E1000_SYSTIMH);
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
int err;
pfc->mbc,
pfc->delay);
- priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
- priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+ prof->rx_pause = !pfc->pfc_en;
+ prof->tx_pause = !pfc->pfc_en;
+ prof->rx_ppp = pfc->pfc_en;
+ prof->tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
+ prof->tx_pause,
+ prof->tx_ppp,
+ prof->rx_pause,
+ prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
*/
rmb();
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+
+ en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+ cqe_err->vendor_err_syndrome,
+ cqe_err->syndrome);
+ }
+
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
- struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
- struct ethhdr *ethh;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
- dma_addr_t dma;
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ if (lso_header_size)
+ data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
+ DS_SIZE));
+ else
+ data = &tx_desc->data;
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) &&
+ !is_inline(skb, NULL)) ? 1 : 0;
+
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (is_inline(skb, &fragptr)) {
+ tx_info->inl = 1;
+ } else {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ u32 byte_count = skb_headlen(skb) - lso_header_size;
+ dma_addr_t dma;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ tx_info->inl = 0;
+ }
+
/*
* For timestamping add flag to skb_shinfo and
* set flag for further reference
}
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ struct ethhdr *ethh;
+
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
/* Copy headers;
* note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- data = ((void *) &tx_desc->lso +
- ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- data = &tx_desc->data;
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++;
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
-
- /* valid only for none inline segments */
- tx_info->data_offset = (void *) data - (void *) tx_desc;
-
- tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
- data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
- if (!is_inline(skb, &fragptr)) {
- /* Map fragments */
- for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
- frag = &skb_shinfo(skb)->frags[i];
- dma = skb_frag_dma_map(priv->ddev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_frag_size(frag));
- --data;
- }
-
- /* Map linear part */
- if (tx_info->linear) {
- dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
- skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
- }
- tx_info->inl = 0;
- } else {
+ if (tx_info->inl) {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
return NETDEV_TX_OK;
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
+ data++;
+ dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
unsigned int len;
unsigned int tx_head = priv->tx_head;
u32 txdes1;
+ int ret = NETDEV_TX_BUSY;
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ goto out_unlock;
}
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
len, DMA_TO_DEVICE);
if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
- return NETDEV_TX_BUSY;
+ goto out_unlock;
}
priv->tx_len[tx_head] = len;
priv->tx_head = TX_NEXT(tx_head);
ndev->trans_start = jiffies;
-
+ ret = NETDEV_TX_OK;
+out_unlock:
spin_unlock_irq(&priv->txlock);
- return NETDEV_TX_OK;
+ return ret;
}
static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
unregister_netdev(ndev);
free_irq(ndev->irq, ndev);
moxart_mac_free_memory(ndev);
- platform_set_drvdata(pdev, NULL);
free_netdev(ndev);
return 0;
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
+#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
int cpu;
__be32 __iomem *dca_tag;
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define SLICE_STATE_IDLE 0
+#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
+#define SLICE_STATE_POLL 2 /* poll owns this slice */
+#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
+#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
+#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
+#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
+ spinlock_t lock;
+ unsigned long lock_napi_yield;
+ unsigned long lock_poll_yield;
+ unsigned long busy_poll_miss;
+ unsigned long busy_poll_cnt;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
return status;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+ spin_lock_init(&ss->lock);
+ ss->state = SLICE_STATE_IDLE;
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state |= SLICE_STATE_NAPI_YIELD;
+ rc = false;
+ ss->lock_napi_yield++;
+ } else
+ ss->state = SLICE_STATE_NAPI;
+ spin_unlock(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+ spin_lock(&ss->lock);
+ WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock(&ss->lock);
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock_bh(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ ss->state |= SLICE_STATE_POLL_YIELD;
+ rc = false;
+ ss->lock_poll_yield++;
+ } else
+ ss->state |= SLICE_STATE_POLL;
+ spin_unlock_bh(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+ spin_lock_bh(&ss->lock);
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock_bh(&ss->lock);
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ WARN_ON(!(ss->state & SLICE_LOCKED));
+ return (ss->state & SLICE_USER_PEND);
+}
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+#endif
+
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
}
}
+#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
+
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
- skb = napi_get_frags(&ss->napi);
+ /* When busy polling in user context, allocate skb and copy headers to
+ * skb's linear memory ourselves. When not busy polling, use the napi
+ * gro api.
+ */
+ polling = myri10ge_ss_busy_polling(ss);
+ if (polling)
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ else
+ skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+ skb_mark_napi_id(skb, &ss->napi);
+
+ if (polling) {
+ int hlen;
+
+ /* myri10ge_vlan_rx might have moved the header, so compute
+ * length and address again.
+ */
+ hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
+ va = page_address(skb_frag_page(&rx_frags[0])) +
+ rx_frags[0].page_offset;
+ /* Copy header into the skb linear memory */
+ skb_copy_to_linear_data(skb, va, hlen);
+ rx_frags[0].page_offset += hlen;
+ rx_frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ else
+ napi_gro_frags(&ss->napi);
- napi_gro_frags(&ss->napi);
return 1;
}
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
+ /* Try later if the busy_poll handler is running. */
+ if (!myri10ge_ss_lock_napi(ss))
+ return budget;
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
+ myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
return work_done;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int myri10ge_busy_poll(struct napi_struct *napi)
+{
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ struct myri10ge_priv *mgp = ss->mgp;
+ int work_done;
+
+ /* Poll only when the link is up */
+ if (mgp->link_state != MXGEFW_LINK_UP)
+ return LL_FLUSH_FAILED;
+
+ if (!myri10ge_ss_lock_poll(ss))
+ return LL_FLUSH_BUSY;
+
+ /* Process a small number of packets */
+ work_done = myri10ge_clean_rx_done(ss, 4);
+ if (work_done)
+ ss->busy_poll_cnt += work_done;
+ else
+ ss->busy_poll_miss++;
+
+ myri10ge_ss_unlock_poll(ss);
+
+ return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
+ "rx_busy_poll_cnt",
+#endif
};
#define MYRI10GE_NET_STATS_LEN 21
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ data[i++] = ss->lock_napi_yield;
+ data[i++] = ss->lock_poll_yield;
+ data[i++] = ss->busy_poll_miss;
+ data[i++] = ss->busy_poll_cnt;
+#endif
}
}
goto abort_with_rings;
}
+ /* Initialize the slice spinlock and state used for polling */
+ myri10ge_ss_init_lock(ss);
+
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ /* Lock the slice to prevent the busy_poll handler from
+ * accessing it. Later when we bring the NIC up, myri10ge_open
+ * resets the slice including this lock.
+ */
+ while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
+ pr_info("Slice %d locked\n", i);
+ mdelay(1);
+ }
}
+ local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
+ napi_hash_del(&ss->napi);
netif_napi_del(&ss->napi);
}
+ /* Wait till napi structs are no longer used, and then free ss. */
+ synchronize_rcu();
kfree(mgp->ss);
mgp->ss = NULL;
}
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
+ napi_hash_add(&ss->napi);
}
return 0;
abort:
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = myri10ge_busy_poll,
+#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* filled by get_settings() on a down link, speed is -1: */
if (speed == UINT_MAX) {
speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
}
ret = mii_ethtool_sset(&adapter->mii, ecmd);
#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 46
-#define QLCNIC_LINUX_VERSIONID "5.2.46"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 48
+#define QLCNIC_LINUX_VERSIONID "5.3.48"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
u32 mbox_reg[4];
struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
};
struct qlcnic_adapter_stats {
u32 num_desc;
void __iomem *crb_sts_consumer;
+ struct qlcnic_host_tx_ring *tx_ring;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
void __iomem *crb_intr_mask;
char name[IFNAMSIZ + 12];
u16 ctx_id;
+
+ u32 state;
u32 producer;
u32 sw_consumer;
u32 num_desc;
+
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
struct qlcnic_adapter *adapter;
u32 state;
u16 context_id;
u16 virt_port;
-
};
/* HW context creation */
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
/*
* Context state
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- __le32 capabilities[4]; /* Flag bit vector */
+ __le32 capabilities[4]; /* Flag bit vector */
__le32 host_int_crb_mode; /* Interrupt crb usage */
__le32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
#define QLCNIC_BEACON_DISABLE 0xD
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
#define __QLCNIC_SRIOV_ENABLE 10
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
tx_ring->producer;
}
+static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err, tx_q;
+
+ tx_q = adapter->max_drv_tx_rings;
+
+ netdev->num_tx_queues = tx_q;
+ netdev->real_num_tx_queues = tx_q;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_q);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
+ tx_q);
+ else
+ dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+
+ return err;
+}
+
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
- int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
- int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *, u8, int);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
}
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
- u8 *mac)
+ u8 *mac, u8 function)
{
- return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
+ u8 num_intr, int txq)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
}
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
- writel(0, sds_ring->crb_intr_mask);
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
}
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
- writel(0x1, sds_ring->crb_intr_mask);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
if (!QLCNIC_IS_MSI_FAMILY(adapter))
writel(0xfbff, adapter->tgt_mask_reg);
__func__, ##_args); \
} while (0)
-#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
return status;
static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
+ bool status;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
}
#endif /* __QLCNIC_H_ */
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
u32 data[]);
-static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
int i;
void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
event[i] = readl(QLCNIC_MBX_FW(ahw, i));
&adapter->idc_aen_work, 0);
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
break;
case QLCNIC_MBX_BC_EVENT:
qlcnic_sriov_handle_bc_event(adapter, event[1]);
/* Make sure carrier is off and queue is stopped during loopback */
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
ret = qlcnic_do_lb_test(adapter, mode);
return ret;
}
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
int status = 0, loop = 0;
- u32 config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
status = qlcnic_83xx_get_port_config(adapter);
if (status)
return status;
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
qlcnic_83xx_clear_lb_mode(adapter, mode);
return -ETIMEDOUT;
int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
struct net_device *netdev = adapter->netdev;
int status = 0, loop = 0;
- u32 config = ahw->port_config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
return -ETIMEDOUT;
}
cmd->req.arg[1] = type;
}
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
+ function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
/* Firmware image definitions */
#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
#define QLC_83XX_BOOT_FROM_FLASH 0
#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+#define QLC_FW_FILE_NAME_LEN 20
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
#define QLC_83XX_MBX_POST_BC_OP 0x1
#define QLC_83XX_MAX_MC_COUNT 38
#define QLC_83XX_MAX_UC_COUNT 4096
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
+static inline void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter,
+ char *file_name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ memset(file_name, 0, QLC_FW_FILE_NAME_LEN);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ strncpy(file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ }
+}
+
static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
{
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
int err = -EIO;
- if (request_firmware(&adapter->ahw->fw_info.fw,
- QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ qlcnic_83xx_get_fw_file_name(adapter, fw_file_name);
+ if (request_firmware(&adapter->ahw->fw_info.fw, fw_file_name,
+ &(adapter->pdev->dev))) {
dev_err(&adapter->pdev->dev,
"No file FW image, loading flash FW image.\n");
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
if (err)
goto detach_mbx;
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto disable_intr;
{QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
{QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
{QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
};
static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
break;
}
dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
} else if (rsp == QLCNIC_CDRP_RSP_OK)
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
- void *addr;
- struct qlcnic_hostrq_rx_ctx *prq;
- struct qlcnic_cardrsp_rx_ctx *prsp;
- struct qlcnic_hostrq_rds_ring *prq_rds;
- struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
struct qlcnic_cardrsp_rds_ring *prsp_rds;
struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_cmd_args cmd;
-
- dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
- u64 phys_addr;
-
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
u8 i, nrds_rings, nsds_rings;
- u16 temp_u16;
+ struct qlcnic_cmd_args cmd;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
int err;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
nrds_rings = adapter->max_rds_rings;
nsds_rings = adapter->max_sds_rings;
- rq_size =
- SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
- nsds_rings);
- rsp_size =
- SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
- nsds_rings);
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &hostrq_phys_addr, GFP_KERNEL);
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp_u16);
- prq->txrx_sds_binding = nsds_rings - 1;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
prq->capabilities[0] = cpu_to_le32(cap);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->host_rds_crb_mode =
- cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
le32_to_cpu(prq->rds_ring_offset));
for (i = 0; i < nrds_rings; i++) {
-
rds_ring = &recv_ctx->rds_rings[i];
rds_ring->producer = 0;
-
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
prq_rds[i].ring_kind = cpu_to_le32(i);
le32_to_cpu(prq->sds_ring_offset));
for (i = 0; i < nsds_rings; i++) {
-
sds_ring = &recv_ctx->sds_rings[i];
sds_ring->consumer = 0;
memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
- prq_sds[i].msi_index = cpu_to_le16(i);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
}
phys_addr = hostrq_phys_addr;
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
-
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
sds_ring = &recv_ctx->sds_rings[i];
-
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = prsp->virt_port;
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
qlcnic_free_mbx_args(&cmd);
+
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
return err;
}
struct qlcnic_host_tx_ring *tx_ring,
int ring)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
struct qlcnic_cardrsp_tx_ctx *prsp;
- void *rq_addr, *rsp_addr;
- size_t rq_size, rsp_size;
- u32 temp;
struct qlcnic_cmd_args cmd;
- int err;
- u64 phys_addr;
- dma_addr_t rq_phys_addr, rsp_phys_addr;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
/* reset host resources */
tx_ring->producer = 0;
}
prq = rq_addr;
-
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
- QLCNIC_CAP0_LSO);
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
prq->capabilities[0] = cpu_to_le32(temp);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->msi_index = 0;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->max_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
prq->interrupt_ctl = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->max_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
} else {
- dev_err(&adapter->pdev->dev,
- "Failed to create tx ctx in firmware%d\n", err);
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
err = -EIO;
}
-
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
}
}
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(dev);
if (err)
goto err_out;
}
set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
return 0;
err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
qlcnic_83xx_config_intrpt(dev, 0);
}
+
return err;
}
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
}
}
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
if (err)
return err;
- cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ cmd.req.arg[1] = function | BIT_8;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+};
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
- int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int min;
min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = adapter->ahw->max_tx_ques;
+ channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->ahw->max_tx_ques;
+ channel->tx_count = adapter->max_drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ int txq = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count != channel->max_tx)
+ if (channel->other_count || channel->combined_count)
return -EINVAL;
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
- return err;
+ if (channel->rx_count) {
+ err = qlcnic_validate_max_rss(adapter, channel->rx_count);
+ if (err)
+ return err;
+ }
+
+ if (channel->tx_count) {
+ err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
+ if (err)
+ return err;
+ txq = channel->tx_count;
+ }
- err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
- netdev_info(dev, "allocated 0x%x sds rings\n",
- adapter->max_sds_rings);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
+ netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
+ adapter->max_sds_rings, adapter->max_drv_tx_rings);
return err;
}
clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
return ret;
}
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_drv_tx_rings = adapter->max_drv_tx_rings;
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
clear_it:
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
+ for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_ring_%d %s", i,
+ qlcnic_tx_ring_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
+
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size;
+ int index, ret, length, size, ring;
char *p;
+ memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ tx_ring = &adapter->tx_ring[ring];
+ *data++ = tx_ring->xmit_on;
+ *data++ = tx_ring->xmit_off;
+ *data++ = tx_ring->xmit_called;
+ *data++ = tx_ring->xmit_finished;
+ }
+ }
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
- tx_ring = adapter->tx_ring;
+ tx_ring = &adapter->tx_ring[0];
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
return 0;
}
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
#define QLCNIC_CMD_CONFIG_VPORT 0x32
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
#define QLCNIC_CMD_GET_LED_STATUS 0x3C
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
struct pci_device_id;
struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
-struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
}
}
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_buffer *cmd_buf;
struct qlcnic_skb_frag *buffrag;
int i, j;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
-
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
struct qlcnic_host_rds_ring *, u16, u16);
+inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+
+static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- unsigned short device = adapter->pdev->device;
-
- if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
+ if (qlcnic_83xx_check(adapter))
return handle | (ring_id << 15);
else
return handle;
}
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *pbuf;
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
- num_txd = tx_ring->num_desc;
-
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
return NETDEV_TX_BUSY;
}
goto drop_packet;
}
+ if (qlcnic_check_multi_tx(adapter))
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ else
+ tx_ring = &adapter->tx_ring[0];
+ num_txd = tx_ring->num_desc;
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
+
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
}
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(tx_ring->txq);
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_start_queue(netdev);
+ netif_tx_start_queue(tx_ring->txq);
} else {
adapter->stats.xmit_off++;
+ tx_ring->xmit_off++;
return NETDEV_TX_BUSY;
}
}
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
goto unwind_buff;
if (adapter->drv_mac_learn)
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ tx_ring->xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
} else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
- return 1;
-
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
+ tx_ring->xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
smp_mb();
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
+ netif_tx_wake_queue(tx_ring->txq);
adapter->stats.xmit_on++;
+ tx_ring->xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
return done;
}
int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
- tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_int(sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
int ring, max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (ring == adapter->max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
- else
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- QLCNIC_NETDEV_WEIGHT*2);
+ QLCNIC_NETDEV_WEIGHT * 2);
+ } else {
+ if (ring == (adapter->max_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ QLCNIC_NETDEV_WEIGHT /
+ max_sds_rings);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
return -ENOMEM;
}
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ QLCNIC_NETDEV_WEIGHT);
+ }
+ }
+
return 0;
}
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
qlcnic_free_tx_rings(adapter);
}
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
}
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
}
void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_int(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
}
#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
};
};
static const struct qlcnic_board_info qlcnic_boards[] = {
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
{ PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_QLE834X,
PCI_VENDOR_ID_QLOGIC,
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
- if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
- return -EIO;
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
cancel_delayed_work_sync(&adapter->fw_work);
}
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
.ndo_fdb_add = qlcnic_fdb_add,
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
.get_board_info = qlcnic_82xx_get_board_info,
.set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
};
+static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int num_tx_q;
+
+ if (ahw->msix_supported &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
+ num_online_cpus());
+ if (num_tx_q > 1) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
+ &adapter->state);
+ adapter->max_drv_tx_rings = num_tx_q;
+ }
+ } else {
+ adapter->max_drv_tx_rings = 1;
+ }
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
+ int max_tx_rings, max_sds_rings, tx_vector;
int err = -1, i;
- int max_tx_rings, tx_vector;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
max_tx_rings = 0;
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
- adapter->max_sds_rings = num_msix;
+ adapter->ahw->num_msix = num_msix;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1))
+ max_sds_rings = num_msix - max_tx_rings;
+ else
+ max_sds_rings = num_msix;
+
+ adapter->max_sds_rings = max_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += max_tx_rings;
}
if (num_msix) {
adapter->msix_entries[0].vector = pdev->irq;
return err;
}
+
if (qlcnic_use_msi || qlcnic_use_msi_x)
return -EOPNOTSUPP;
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
if (!num_intr)
num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- if (adapter->ahw->msix_supported)
+ if (ahw->msix_supported) {
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
num_intr));
- else
+ if (qlcnic_check_multi_tx(adapter)) {
+ if (txq)
+ adapter->max_drv_tx_rings = txq;
+ num_msix += adapter->max_drv_tx_rings;
+ }
+ } else {
num_msix = 1;
+ }
err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM || !err)
+ if (err == -ENOMEM)
return err;
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
return err;
+ }
- return -EIO;
+ return 0;
}
void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
(ring == (num_sds_rings - 1))) {
if (!(adapter->flags &
QLCNIC_MSIX_ENABLED))
return err;
}
}
- if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+
qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
- qlcnic_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
}
/* Usage: During suspend and firmware recovery module */
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ int max_tx_rings = adapter->max_drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
adapter->ahw->diag_test = 0;
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_tx_rings;
if (qlcnic_attach(adapter))
goto out;
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
+ adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ err = qlcnic_set_real_num_queues(adapter, netdev);
+ if (err)
+ return err;
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
tx_ring->cmd_buf_arr = cmd_buf_arr;
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
}
}
}
+
return 0;
}
ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
default:
if (err)
goto err_out_free_hw_res;
- netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
if (!netdev) {
err = -ENOMEM;
goto err_out_iounmap;
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
adapter->drv_mac_learn = true;
- adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
if (qlcnic_82xx_check(adapter)) {
goto err_out_free_hw;
}
+ qlcnic_get_multiq_capability(adapter);
+
+ if ((adapter->ahw->act_pci_func > 2) &&
+ qlcnic_check_multi_tx(adapter)) {
+ adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
+ dev_info(&adapter->pdev->dev,
+ "vNIC mode enabled, Set max TX rings = %d\n",
+ adapter->max_drv_tx_rings);
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+ }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
} else if (qlcnic_83xx_check(adapter)) {
+ adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
+ qlcnic_read_phys_port_id(adapter);
+
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
if (err)
goto err_out;
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
return 0;
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+ if (qlcnic_82xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ dev_info(&netdev->dev, "ring=%d\n", ring);
+ dev_info(&netdev->dev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+ dev_info(&netdev->dev, "producer=%d\n",
+ readl(tx_ring->crb_cmd_producer));
+ dev_info(&netdev->dev, "sw_consumer = %d\n",
+ tx_ring->sw_consumer);
+ dev_info(&netdev->dev, "hw_consumer = %d\n",
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ dev_info(&netdev->dev, "xmit-on=%llu\n",
+ tx_ring->xmit_on);
+ dev_info(&netdev->dev, "xmit-off=%llu\n",
+ tx_ring->xmit_off);
+ }
+ }
adapter->ahw->reset_context = 1;
}
}
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
kfree(adapter->msix_entries);
return err;
}
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, int txq)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw = QLCNIC_MAX_TX_RINGS;
+ u32 max_allowed;
+
+ if (!qlcnic_82xx_check(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (txq > QLCNIC_MAX_TX_RINGS) {
+ netdev_err(netdev, "Invalid ring count\n");
+ return -EINVAL;
+ }
+
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((txq > max_allowed) || !is_power_of_2(txq)) {
+ if (!is_power_of_2(txq))
+ netdev_err(netdev,
+ "TX queue should be a power of 2\n");
+ if (txq > num_online_cpus())
+ netdev_err(netdev,
+ "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
+ num_online_cpus());
+ netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
+ __u32 val)
{
struct net_device *netdev = adapter->netdev;
u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (val > QLC_MAX_SDS_RINGS) {
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (val > QLCNIC_MAX_SDS_RINGS) {
netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLC_MAX_SDS_RINGS);
+ QLCNIC_MAX_SDS_RINGS);
return -EINVAL;
}
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
{
int err;
struct net_device *netdev = adapter->netdev;
+ int num_msix;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
+ if (qlcnic_82xx_check(adapter)) {
+ if (txq != 0)
+ adapter->max_drv_tx_rings = txq;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (txq > adapter->max_drv_tx_rings))
+ num_msix = adapter->max_drv_tx_rings;
+ else
+ num_msix = data;
+ }
+
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
+ netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
+
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data);
+
+ err = qlcnic_setup_intr(adapter, data, txq);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
- err = len;
- done:
+done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+ struct qlcnic_cmd_args *cmd, u32 cap)
{
- adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
- adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ if (cap & QLC_83XX_PVID_STRIP_CAPABILITY) {
+ adapter->rx_pvid = 0;
+ } else {
+ adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ }
return 0;
}
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
- int ret;
+ int ret, cap;
+ cap = info->capabilities;
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
return ret;
ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
break;
case QLC_PVID_MODE:
- ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+ ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd, cap);
break;
}
}
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter);
+ err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
if (err)
return err;
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1);
+ err = qlcnic_setup_intr(adapter, 1, 0);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
-
- if (regs->len > R8169_REGS_SIZE)
- regs->len = R8169_REGS_SIZE;
+ u32 __iomem *data = tp->mmio_addr;
+ u32 *dw = p;
+ int i;
rtl_lock_work(tp);
- memcpy_fromio(p, tp->mmio_addr, regs->len);
+ for (i = 0; i < R8169_REGS_SIZE; i += 4)
+ memcpy_fromio(dw++, data++, 4);
rtl_unlock_work(tp);
}
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7724,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7757,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.rmcr_value = 0x00000001,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
};
static struct sh_eth_cpu_data sh7619_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1,
};
static struct sh_eth_cpu_data sh771x_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tsu = 1,
};
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
- mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
SH_ETH_MAX_REGISTER_OFFSET,
};
+enum {
+ SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RCAR,
+ SH_ETH_REG_FAST_SH4,
+ SH_ETH_REG_FAST_SH3_SH2
+};
+
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
void (*set_rate)(struct net_device *ndev);
/* mandatory initialize value */
+ int register_type;
unsigned long eesipr_value;
/* optional initialize value */
struct sis190_private *tp = netdev_priv(dev);
unsigned long flags;
- if (regs->len > SIS190_REGS_SIZE)
- regs->len = SIS190_REGS_SIZE;
-
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- int no_csum_insertion;
spinlock_t lock;
struct phy_device *phydev ____cacheline_aligned_in_smp;
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely(priv->plat->force_sf_dma_mode ||
- ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
static int bigmac_sbus_remove(struct platform_device *op)
{
- struct bigmac *bp = dev_get_drvdata(&op->dev);
+ struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
struct platform_device *qec_op;
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
static int hme_sbus_remove(struct platform_device *op)
{
- struct happy_meal *hp = dev_get_drvdata(&op->dev);
+ struct happy_meal *hp = platform_get_drvdata(op);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
#include <linux/of_device.h>
#include <linux/if_vlan.h>
-#include <linux/platform_data/cpsw.h>
#include <linux/pinctrl/consumer.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
return -EOPNOTSUPP;
}
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->slaves[slave_no].phy)
+ phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.get_sset_count = cpsw_get_sset_count,
.get_strings = cpsw_get_strings,
.get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
-/*
- * Texas Instruments Ethernet Switch Driver
+/* Texas Instruments Ethernet Switch Driver
*
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
int phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-
};
struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- struct cpsw_slave_data *slave_data;
u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
int cpdma_chan_dump(struct cpdma_chan *chan)
{
static int davinci_mdio_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ struct davinci_mdio_data *data = platform_get_drvdata(pdev);
if (data->bus) {
mdiobus_unregister(data->bus);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- dev_set_drvdata(dev, NULL);
-
kfree(data);
return 0;
*/
static int xemaclite_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(of_dev);
struct net_local *lp = netdev_priv(ndev);
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev, of_dev);
- dev_set_drvdata(dev, NULL);
return 0;
}
err = register_netdev(dev);
if (err == 0)
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
if (err) {
if (si->pdata->shutdown)
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
- if (via_ircc_open(pcidev, &info, 0x3076) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3076);
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
- if (via_ircc_open(pcidev, &info, 0x3096) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
} else
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q = macvtap_get_queue(dev, skb);
- netdev_features_t features;
+ netdev_features_t features = TAP_FEATURES;
+
if (!q)
goto drop;
skb->dev = dev;
/* Apply the forward feature mask so that we perform segmentation
- * according to users wishes.
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
*/
- features = netif_skb_features(skb) & vlan->tap_features;
+ if (q->flags & IFF_VNET_HDR)
+ features |= vlan->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
/* tap_features are the same as features on tun/tap and
* reflect user expectations.
*/
- vlan->tap_features = vlan->dev->features &
- (feature_mask | ~TUN_OFFLOADS);
+ vlan->tap_features = feature_mask;
vlan->set_features = features;
netdev_update_features(vlan->dev);
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
- /* TODO: only accept frames with the features that
- got enabled for forwarded frames */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
rtnl_lock();
ret = set_offload(q, arg);
rtnl_unlock();
bus->mii_bus->read = octeon_mdiobus_read;
bus->mii_bus->write = octeon_mdiobus_write;
- dev_set_drvdata(&pdev->dev, bus);
+ platform_set_drvdata(pdev, bus);
err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node);
if (err)
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/micrel_phy.h>
+#include <linux/of.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+/* Write/read to/from extended registers */
+#define MII_KSZPHY_EXTREG 0x0b
+#define KSZPHY_EXTREG_WRITE 0x8000
+
+#define MII_KSZPHY_EXTREG_WRITE 0x0c
+#define MII_KSZPHY_EXTREG_READ 0x0d
+
+/* Extended registers */
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
+
+#define PS_TO_REG 200
+
static int ksz_config_flags(struct phy_device *phydev)
{
int regval;
return 0;
}
+static int kszphy_extended_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
+ return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
+}
+
+static int kszphy_extended_read(struct phy_device *phydev,
+ u32 regnum)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
+ return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
+}
+
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
/* bit[7..0] int status, which is a read and clear register. */
return rc < 0 ? rc : 0;
}
+static int ksz9021_load_values_from_of(struct phy_device *phydev,
+ struct device_node *of_node, u16 reg,
+ char *field1, char *field2,
+ char *field3, char *field4)
+{
+ int val1 = -1;
+ int val2 = -2;
+ int val3 = -3;
+ int val4 = -4;
+ int newval;
+ int matches = 0;
+
+ if (!of_property_read_u32(of_node, field1, &val1))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field2, &val2))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field3, &val3))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field4, &val4))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < 4)
+ newval = kszphy_extended_read(phydev, reg);
+ else
+ newval = 0;
+
+ if (val1 != -1)
+ newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
+
+ if (val2 != -1)
+ newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
+
+ if (val3 != -1)
+ newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
+
+ if (val4 != -1)
+ newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
+
+ return kszphy_extended_write(phydev, reg, newval);
+}
+
+static int ksz9021_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_CLK_CONTROL_PAD_SKEW,
+ "txen-skew-ps", "txc-skew-ps",
+ "rxdv-skew-ps", "rxc-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_RX_DATA_PAD_SKEW,
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_TX_DATA_PAD_SKEW,
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps");
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
-#define RTL8211E_INER_LINK_STAT 0x10
+#define RTL8211E_INER_LINK_STATUS 0x400
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL8211E_INER_LINK_STAT);
+ RTL8211E_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
- u16 queue_index;
+ union {
+ u16 queue_index;
+ unsigned int ifindex;
+ };
struct list_head next;
struct tun_struct *detached;
};
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
int err;
err = 0;
/* Re-attach the filter to presist device */
- if (tun->filter_attached == true) {
+ if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
goto out;
if (err < 0)
return err;
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
if (err < 0)
return err;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
+ dev->ifindex = tfile->ifindex;
tun = netdev_priv(dev);
tun->dev = dev;
dev->vlan_features = dev->features;
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, false);
if (err < 0)
goto err_free_dev;
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file);
+ ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
+ unsigned int ifindex;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
ret = -EFAULT;
goto unlock;
}
+ if (cmd == TUNSETIFINDEX) {
+ ret = -EPERM;
+ if (tun)
+ goto unlock;
+
+ ret = -EFAULT;
+ if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+ goto unlock;
+
+ ret = 0;
+ tfile->ifindex = ifindex;
+ goto unlock;
+ }
ret = -EBADFD;
if (!tun)
case TUNGETIFF:
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ if (tfile->detached)
+ ifr.ifr_flags |= IFF_DETACH_QUEUE;
+ if (!tfile->socket.sk->sk_filter)
+ ifr.ifr_flags |= IFF_NOFILTER;
+
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
tun_detach_filter(tun, tun->numqueues);
break;
+ case TUNGETFILTER:
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = -EFAULT;
+ if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
+ break;
+ ret = 0;
+ break;
+
default:
ret = -EINVAL;
break;
rcu_assign_pointer(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
+ tfile->ifindex = 0;
rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
- u8 config_data[17];
+ u8 *config_data = kmalloc(17, GFP_KERNEL);
u32 if_num = interface->altsetting->desc.bInterfaceNumber;
s32 result;
+ if (!config_data)
+ return -ENOMEM;
if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x86, 0xC0, 0, 0, config_data, 17,
USB_CTRL_SET_TIMEOUT) != 0x11) {
+ kfree(config_data);
return -EIO;
}
if (config_data[16] & 0x1)
result |= HSO_INFO_CRC_BUG;
+ kfree(config_data);
return result;
}
struct hso_shared_int *shared_int;
struct hso_device *tmp_dev = NULL;
+ if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+ dev_err(&interface->dev, "Not our interface\n");
+ return -ENODEV;
+ }
+
if_num = interface->altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
else
port_spec = hso_get_config_data(interface);
- if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
- dev_err(&interface->dev, "Not our interface\n");
- return -ENODEV;
- }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
if (interface->num_altsetting > 1)
return 0;
}
-static void vxlan_fdb_free_rdst(struct rcu_head *head)
-{
- struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
- kfree(rd);
-}
-
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
*/
if (rd && !list_is_singular(&f->remotes)) {
list_del_rcu(&rd->list);
- call_rcu(&rd->rcu, vxlan_fdb_free_rdst);
+ kfree_rcu(rd, rcu);
goto out;
}
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (ctx->vif)
ieee80211_chswitch_done(ctx->vif, is_success);
}
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
-#define APMG_RTC_INT_STT_RFKILL (0x10000000)
-
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
schedule_work(&mvm->roc_done_wk);
}
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+ ieee80211_connection_loss(vif);
+ return true;
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
* P2P Device discoveribility, while there are other higher priority
* events in the system).
*/
- WARN_ONCE(!le32_to_cpu(notif->status),
- "Failed to schedule time event\n");
+ if (WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n")) {
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm,
* By now, we should have finished association
* and know the dtim period.
*/
- if (te_data->vif->type == NL80211_IFTYPE_STATION &&
- (!te_data->vif->bss_conf.assoc ||
- !te_data->vif->bss_conf.dtim_period)) {
- IWL_ERR(mvm,
- "No assocation and the time event is over already...\n");
- ieee80211_connection_loss(te_data->vif);
- }
-
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No assocation and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
te_data->running = true;
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
- /*
- * Clear the interrupt in APMG if the NIC is going down.
- * Note that when the NIC exits RFkill (else branch), we
- * can't access prph and the NIC will be reset in
- * start_hw anyway.
- */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
- APMG_RTC_INT_STT_RFKILL);
set_bit(STATUS_RFKILL, &trans_pcie->status);
if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))
goto out_no_pci;
}
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
goto exit;
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
+ memcpy(&ret, buf, sizeof(ret));
+
if (ret & 0x80) {
err = -EIO;
goto exit;
mem = (unsigned long)
dt_alloc(size + 4, __alignof__(struct device_node));
+ memset((void *)mem, 0, size);
+
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %lx...\n", mem);
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = &pctl->groups[group];
+ unsigned long flags;
u32 val, mask;
u16 strength;
u8 dlevel;
* 3: 40mA
*/
dlevel = strength / 10 - 1;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
pctl->membase + sunxi_dlevel_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_UP:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
default:
break;
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ u32 val, mask;
+
+ spin_lock_irqsave(&pctl->lock, flags);
- u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
- u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+ val = readl(pctl->membase + sunxi_mux_reg(pin));
+ mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
writel((val & ~mask) | config << sunxi_mux_offset(pin),
pctl->membase + sunxi_mux_reg(pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
u32 reg = sunxi_data_reg(offset);
u8 index = sunxi_data_offset(offset);
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
- writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+ if (value)
+ regval |= BIT(index);
+ else
+ regval &= ~(BIT(index));
+
+ writel(regval, pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ unsigned long flags;
+ u32 regval;
u8 mode;
switch (type) {
return -EINVAL;
}
- writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
+ regval &= ~IRQ_CFG_IRQ_MASK;
+ writel(regval | (mode << index), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
}
u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + ctrl_reg);
writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
/* Clear the IRQ */
writel(1 << status_idx, pctl->membase + status_reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + reg);
writel(val & ~(1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
struct sunxi_desc_function *func;
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
/* Change muxing to INT mode */
sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Unmask the IRQ */
val = readl(pctl->membase + reg);
writel(val | (1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static struct irq_chip sunxi_pinctrl_irq_chip = {
return -ENOMEM;
platform_set_drvdata(pdev, pctl);
+ spin_lock_init(&pctl->lock);
+
pctl->membase = of_iomap(node, 0);
if (!pctl->membase)
return -ENOMEM;
#define __PINCTRL_SUNXI_H
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#define PA_BASE 0
#define PB_BASE 32
unsigned ngroups;
int irq;
int irq_array[SUNXI_IRQ_NUMBER];
+ spinlock_t lock;
struct pinctrl_dev *pctl_dev;
};
return platform_driver_register(&olpc_ec_plat_driver);
}
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_LICENSE("GPL");
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
return (state & 0x4) ? 1 : 0;
}
-static int hp_wmi_enable_hotkeys(void)
-{
- int ret;
- int query = 0x6e;
-
- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
-
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
err = hp_wmi_input_setup();
if (err)
return err;
-
- hp_wmi_enable_hotkeys();
}
if (bios_capable) {
if (pos < 0)
return pos;
- return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ pos == SPEED ? "speed" :
+ pos == STAMINA ? "stamina" :
+ pos == AUTO ? "auto" : "unknown");
}
static int sony_nc_gfx_switch_setup(struct platform_device *pd,
goto err_free_resources;
}
- if (sonypi_compat_init())
+ result = sonypi_compat_init();
+ if (result)
goto err_remove_input;
/* request io port */
#include <linux/eventfd.h>
#include <linux/vhost.h>
-#include <linux/socket.h> /* memcpy_fromiovec */
+#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
+ struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
+ int masked;
if (!VALID_EVTCHN(evtchn))
return -1;
bind_vcpu.port = evtchn;
bind_vcpu.vcpu = tcpu;
+ /*
+ * Mask the event while changing the VCPU binding to prevent
+ * it being delivered on an unexpected VCPU.
+ */
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
/*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
return 0;
}
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern void ext4_truncate(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
- if (err) {
- /* Errors can only happen if there is a bug */
- handle->h_err = err;
- __ext4_journal_stop(where, line, handle);
+ /* Errors can only happen if there is a bug */
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
}
} else {
if (inode)
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
*/
- if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
- struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
- spin_lock(&inode->i_lock);
- if (!ei->jinode) {
- if (!jinode) {
- spin_unlock(&inode->i_lock);
- return -ENOMEM;
- }
- ei->jinode = jinode;
- jbd2_journal_init_jbd_inode(ei->jinode, inode);
- jinode = NULL;
- }
- spin_unlock(&inode->i_lock);
- if (unlikely(jinode != NULL))
- jbd2_free_inode(jinode);
+ if (filp->f_mode & FMODE_WRITE) {
+ int ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ return ret;
}
return dquot_file_open(inode, filp);
}
offset;
}
+ if (offset & (sb->s_blocksize - 1) ||
+ (offset + length) & (sb->s_blocksize - 1)) {
+ /*
+ * Attach jinode to inode for jbd2 if we do any zeroing of
+ * partial block
+ */
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ goto out_mutex;
+
+ }
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
return ret;
}
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct jbd2_inode *jinode;
+
+ if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+ return 0;
+
+ jinode = jbd2_alloc_inode(GFP_KERNEL);
+ spin_lock(&inode->i_lock);
+ if (!ei->jinode) {
+ if (!jinode) {
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+ ei->jinode = jinode;
+ jbd2_journal_init_jbd_inode(ei->jinode, inode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ if (unlikely(jinode != NULL))
+ jbd2_free_inode(jinode);
+ return 0;
+}
+
/*
* ext4_truncate()
*
return;
}
+ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+ if (ext4_inode_attach_jinode(inode) < 0)
+ return;
+ }
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (IS_ERR(glock_workqueue))
- return PTR_ERR(glock_workqueue);
+ if (!glock_workqueue)
+ return -ENOMEM;
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
- if (IS_ERR(gfs2_delete_workqueue)) {
+ if (!gfs2_delete_workqueue) {
destroy_workqueue(glock_workqueue);
- return PTR_ERR(gfs2_delete_workqueue);
+ return -ENOMEM;
}
register_shrinker(&glock_shrinker);
* None of the buffers should be dirty, locked, or pinned.
*/
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ unsigned int nr_revokes)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct list_head *head = &gl->gl_ail_list;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+ list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+ if (nr_revokes == 0)
+ break;
bh = bd->bd_bh;
if (bh->b_state & b_state) {
if (fsync)
gfs2_ail_error(gl, bh);
}
gfs2_trans_add_revoke(sdp, bd);
+ nr_revokes--;
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
- __gfs2_ail_flush(gl, 0);
+ __gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
{
struct gfs2_sbd *sdp = gl->gl_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count);
+ unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret;
if (!revokes)
return;
- ret = gfs2_trans_begin(sdp, 0, revokes);
+ while (revokes > max_revokes)
+ max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+ ret = gfs2_trans_begin(sdp, 0, max_revokes);
if (ret)
return;
- __gfs2_ail_flush(gl, fsync);
+ __gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
}
}
gfs2_glock_dq_uninit(ghs);
if (IS_ERR(d))
- return PTR_RET(d);
+ return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
struct gfs2_holder gh;
int ret;
+ /* For selinux during lookup */
+ if (gfs2_glock_is_locked_by_me(ip->i_gl))
+ return generic_getxattr(dentry, name, data, size);
+
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
goto fail_wq;
gfs2_control_wq = alloc_workqueue("gfs2_control",
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
goto fail_recovery;
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- bio_put(bio);
- /* to be detected by submit_seg_bio() */
+ /* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
+ segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
- segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
- return 0;
+ return 1;
}
int proc_readdir(struct file *file, struct dir_context *ctx)
static int proc_root_readdir(struct file *file, struct dir_context *ctx)
{
if (ctx->pos < FIRST_PROCESS_ENTRY) {
- proc_readdir(file, ctx);
+ int error = proc_readdir(file, ctx);
+ if (unlikely(error <= 0))
+ return error;
ctx->pos = FIRST_PROCESS_ENTRY;
}
#include <linux/spi/spi.h>
-/**
+/*
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
- * @irq_flags: - IRQF configuration flags
- * @board_specific_setup: - called before probing the chip (power,reset)
- * @transceiver_enable: - called to power on/off the transceiver
- * @power_enable: - called to power on/off the mcp *and* the
- * transceiver
- *
- * Please note that you should define power_enable or transceiver_enable or
- * none of them. Defining both of them is no use.
- *
*/
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
- unsigned long irq_flags;
- int (*board_specific_setup)(struct spi_device *spi);
- int (*transceiver_enable)(int enable);
- int (*power_enable) (int enable);
};
#endif /* __CAN_PLATFORM_MCP251X_H__ */
#ifndef FS_ENET_PD_H
#define FS_ENET_PD_H
+#include <linux/clk.h>
#include <linux/string.h>
#include <linux/of_mdio.h>
#include <linux/if_ether.h>
int use_rmii; /* use RMII mode */
int has_phy; /* if the network is phy container as well...*/
+
+ struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_mii_fec_platform_info {
u32 irq[32];
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
- IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
+extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
struct nf_conn;
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
+ int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report);
};
extern struct nfq_ct_hook __rcu *nfq_ct_hook;
struct nfq_ct_nat_hook {
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
- u32 ctinfo, int off);
+ u32 ctinfo, s32 off);
};
extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
#else
struct user_namespace;
#ifdef CONFIG_MMU
-extern unsigned long mmap_legacy_base(void);
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
#include <linux/if_ether.h>
enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
-enum {
- SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RCAR,
- SH_ETH_REG_FAST_SH4,
- SH_ETH_REG_FAST_SH3_SH2
-};
struct sh_eth_plat_data {
int phy;
int edmac_endian;
- int register_type;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
const struct nf_conntrack_tuple *tuple);
extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
-extern void nf_ct_delete_from_lists(struct nf_conn *ct);
-extern void nf_ct_dying_timeout(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
}
/* These are for NAT. Icky. */
-extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
-nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
+nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net, u16 zone,
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
#define LOG_INVALID(net, proto) \
((net)->ct.sysctl_log_invalid == (proto) || \
(net)->ct.sysctl_log_invalid == IPPROTO_RAW)
#else
-#define LOG_INVALID(net, proto) \
- (((net)->ct.sysctl_log_invalid == (proto) || \
- (net)->ct.sysctl_log_invalid == IPPROTO_RAW) \
- && net_ratelimit())
-#endif
-#else
static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
#endif /* CONFIG_SYSCTL */
u_int32_t correction_pos;
/* sequence number offset before and after last modification */
- int16_t offset_before, offset_after;
+ int32_t offset_before, offset_after;
};
#include <linux/list.h>
extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off);
+ __be32 seq, s32 off);
extern int nf_nat_seq_adjust(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
extern void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *this);
-extern s16 nf_nat_get_offset(const struct nf_conn *ct,
+extern s32 nf_nat_get_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
- u32 dir, int off);
+ u32 dir, s32 off);
#endif
+++ /dev/null
-#ifndef _NF_TPROXY_CORE_H
-#define _NF_TPROXY_CORE_H
-
-#include <linux/types.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
-#include <net/tcp.h>
-
-#define NFT_LOOKUP_ANY 0
-#define NFT_LOOKUP_LISTENER 1
-#define NFT_LOOKUP_ESTABLISHED 2
-
-/* look up and get a reference to a matching socket */
-
-
-/* This function is used by the 'TPROXY' target and the 'socket'
- * match. The following lookups are supported:
- *
- * Explicit TProxy target rule
- * ===========================
- *
- * This is used when the user wants to intercept a connection matching
- * an explicit iptables rule. In this case the sockets are assumed
- * matching in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple, it is returned, assuming the redirection
- * already took place and we process a packet belonging to an
- * established connection
- *
- * - match: if there's a listening socket matching the redirection
- * (e.g. on-port & on-ip of the connection), it is returned,
- * regardless if it was bound to 0.0.0.0 or an explicit
- * address. The reasoning is that if there's an explicit rule, it
- * does not really matter if the listener is bound to an interface
- * or to 0. The user already stated that he wants redirection
- * (since he added the rule).
- *
- * "socket" match based redirection (no specific rule)
- * ===================================================
- *
- * There are connections with dynamic endpoints (e.g. FTP data
- * connection) that the user is unable to add explicit rules
- * for. These are taken care of by a generic "socket" rule. It is
- * assumed that the proxy application is trusted to open such
- * connections without explicit iptables rule (except of course the
- * generic 'socket' rule). In this case the following sockets are
- * matched in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple
- *
- * - match: if there's a non-zero bound listener (possibly with a
- * non-local address) We don't accept zero-bound listeners, since
- * then local services could intercept traffic going through the
- * box.
- *
- * Please note that there's an overlap between what a TPROXY target
- * and a socket match will match. Normally if you have both rules the
- * "socket" match will be the first one, effectively all packets
- * belonging to established connections going through that one.
- */
-static inline struct sock *
-nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
- const __be32 saddr, const __be32 daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = __inet_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, dport,
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = inet_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
- protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline struct sock *
-nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
- const struct in6_addr *saddr, const struct in6_addr *daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = inet6_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet6_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, ntohs(dport),
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = __inet6_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, ntohs(dport),
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
- protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-#endif
-
-/* assign a socket to the skb -- consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
-
-#endif
enum ip_conntrack_info ctinfo);
void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff);
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report);
#else
inline struct nf_conn *
nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
enum ip_conntrack_info ctinfo, int diff)
{
}
+
+inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ return 0;
+}
#endif /* NF_CONNTRACK */
#endif
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
-struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
#define TUNGETVNETHDRSZ _IOR('T', 215, int)
#define TUNSETVNETHDRSZ _IOW('T', 216, int)
#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
+#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
#define IFF_DETACH_QUEUE 0x0400
/* read-only flag */
#define IFF_PERSIST 0x0800
+#define IFF_NOFILTER 0x1000
/* Socket options */
#define TUN_TX_TIMESTAMP 1
__u8 reserved;
};
+/* index values for the variables in ipv4_devconf */
+enum
+{
+ IPV4_DEVCONF_FORWARDING=1,
+ IPV4_DEVCONF_MC_FORWARDING,
+ IPV4_DEVCONF_PROXY_ARP,
+ IPV4_DEVCONF_ACCEPT_REDIRECTS,
+ IPV4_DEVCONF_SECURE_REDIRECTS,
+ IPV4_DEVCONF_SEND_REDIRECTS,
+ IPV4_DEVCONF_SHARED_MEDIA,
+ IPV4_DEVCONF_RP_FILTER,
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
+ IPV4_DEVCONF_BOOTP_RELAY,
+ IPV4_DEVCONF_LOG_MARTIANS,
+ IPV4_DEVCONF_TAG,
+ IPV4_DEVCONF_ARPFILTER,
+ IPV4_DEVCONF_MEDIUM_ID,
+ IPV4_DEVCONF_NOXFRM,
+ IPV4_DEVCONF_NOPOLICY,
+ IPV4_DEVCONF_FORCE_IGMP_VERSION,
+ IPV4_DEVCONF_ARP_ANNOUNCE,
+ IPV4_DEVCONF_ARP_IGNORE,
+ IPV4_DEVCONF_PROMOTE_SECONDARIES,
+ IPV4_DEVCONF_ARP_ACCEPT,
+ IPV4_DEVCONF_ARP_NOTIFY,
+ IPV4_DEVCONF_ACCEPT_LOCAL,
+ IPV4_DEVCONF_SRC_VMARK,
+ IPV4_DEVCONF_PROXY_ARP_PVLAN,
+ IPV4_DEVCONF_ROUTE_LOCALNET,
+ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ __IPV4_DEVCONF_MAX
+};
+
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
#endif /* _UAPI_LINUX_IP_H */
header-y += xt_CONNSECMARK.h
header-y += xt_CT.h
header-y += xt_DSCP.h
+header-y += xt_HMARK.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_LOG.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_rpfilter.h
header-y += xt_sctp.h
header-y += xt_set.h
header-y += xt_socket.h
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
+ NFQA_EXP, /* nf_conntrack_netlink.h */
__NFQA_MAX
};
Memory Resource Controller Swap Extension comes with its price in
a bigger memory consumption. General purpose distribution kernels
which want to enable the feature but keep it disabled by default
- and let the user enable it by swapaccount boot command line
+ and let the user enable it by swapaccount=1 boot command line
parameter should have this option unselected.
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
{
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
+ int retval = 0;
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
goto out_unlock;
+ }
switch (type) {
case FILE_CPU_EXCLUSIVE:
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
- sched_clock_mask = (1 << bits) - 1;
+ sched_clock_mask = (1ULL << bits) - 1;
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
* Don't allow the user to think they can get
* full NO_HZ with this machine.
*/
- WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+ WARN_ONCE(have_nohz_full_mask,
+ "NO_HZ FULL will not work with unstable sched clock");
return false;
}
#endif
void __init tick_nohz_init(void)
{
- int cpu;
-
if (!have_nohz_full_mask) {
if (tick_nohz_init_all() < 0)
return;
/**
* wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
*
* Wake up anyone waiting for the atomic_t to go to zero.
*
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4_compress);
+EXPORT_SYMBOL(lz4_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress);
+EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4hc_compress);
+EXPORT_SYMBOL(lz4hc_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4HC compressor");
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
- /* consider enabled if no parameter or 1 is given */
if (!strcmp(s, "1"))
really_do_swap_account = 1;
else if (!strcmp(s, "0"))
switch (packet_type) {
case BATADV_UNICAST:
- batadv_unicast_prepare_skb(skb, orig_node);
+ if (!batadv_unicast_prepare_skb(skb, orig_node))
+ goto out;
+
header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
- batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
- packet_subtype);
+ if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+ packet_subtype))
+ goto out;
+
header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
if (!pv)
return;
- for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
goto out;
}
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
if (err)
goto out;
err = __br_fdb_delete(p, addr, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_delete(p, addr, 0);
goto out;
}
* vlan on this port.
*/
err = -ENOENT;
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
}
else
pv = br_get_vlan_info(br);
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
goto nla_put_failure;
pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
vinfo.vid = vid;
vinfo.flags = 0;
if (vid == pvid)
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
- if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
{
smp_wmb();
v->pvid = 0;
- bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+ bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
static LIST_HEAD(lowpan_devices);
-/*
- * Uncompression of linklocal:
- * 0 -> 16 bytes from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 2 bytes from prefix - zeroes + 2 from packet
- * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
- *
- * NOTE: => the uncompress function does change 0xf to 0x10
- * NOTE: 0x00 => no-autoconfig => unspecified
- */
-static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
-
-/*
- * Uncompression of ctx-based:
- * 0 -> 0 bits from packet [unspecified / reserved]
- * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 8 bytes from prefix - zeroes + 2 from packet
- * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
- */
-static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
-
-/*
- * Uncompression of ctx-base
- * 0 -> 0 bits from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
- * 2 -> 2 bytes from prefix - zeroes + 3 from packet
- * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
- */
-static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
-
-/* Link local prefix */
-static const u8 lowpan_llprefix[] = {0xfe, 0x80};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
return rol8(val, shift);
}
-static void
-lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 address_mode,
+ const struct ieee802154_addr *lladdr)
{
- memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
- /* second bit-flip (Universe/Local) is done according RFC2464 */
- ipaddr->s6_addr[8] ^= 0x02;
+ bool fail;
+
+ switch (address_mode) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* for global link addresses */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ break;
+ case LOWPAN_IPHC_ADDR_02:
+ /* fe:80::ff:fe00:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ break;
+ case LOWPAN_IPHC_ADDR_03:
+ fail = false;
+ switch (lladdr->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
+ IEEE802154_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ /* fe:80::ff:fe00:XXXX
+ * \__/
+ * short_addr
+ *
+ * Universe/Local bit is zero.
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
+ break;
+ default:
+ pr_debug("Invalid addr_type set\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
}
-/*
- * Uncompress addresses based on a prefix and a postfix with zeroes in
- * between. If the postfix is zero in length it will use the link address
- * to configure the IP address (autoconf style).
- * pref_post_count takes a byte where the first nibble specify prefix count
- * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+/* Uncompress address function for source context
+ * based address(non-multicast).
*/
static int
-lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
- u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 sam)
{
- u8 prefcount = pref_post_count >> 4;
- u8 postcount = pref_post_count & 0x0f;
-
- /* full nibble 15 => 16 */
- prefcount = (prefcount == 15 ? 16 : prefcount);
- postcount = (postcount == 15 ? 16 : postcount);
-
- if (lladdr)
- lowpan_raw_dump_inline(__func__, "linklocal address",
- lladdr, IEEE802154_ADDR_LEN);
- if (prefcount > 0)
- memcpy(ipaddr, prefix, prefcount);
-
- if (prefcount + postcount < 16)
- memset(&ipaddr->s6_addr[prefcount], 0,
- 16 - (prefcount + postcount));
-
- if (postcount > 0) {
- memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
- skb_pull(skb, postcount);
- } else if (prefcount > 0) {
- if (lladdr == NULL)
- return -EINVAL;
+ switch (sam) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* unspec address ::
+ * Do nothing, address is already ::
+ */
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_02:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_03:
+ /* TODO */
+ netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+ return -EINVAL;
+ default:
+ pr_debug("Invalid sam value: 0x%x\n", sam);
+ return -EINVAL;
+ }
+
+ lowpan_raw_dump_inline(NULL,
+ "Reconstructed context based ipv6 src addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
- /* no IID based configuration if no prefix and no data */
- lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
+{
+ bool fail;
+
+ switch (dam) {
+ case LOWPAN_IPHC_DAM_00:
+ /* 00: 128 bits. The full address
+ * is carried in-line.
+ */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ /* 01: 48 bits. The address takes
+ * the form ffXX::00XX:XXXX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ /* 10: 32 bits. The address takes
+ * the form ffXX::00XX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ /* 11: 8 bits. The address takes
+ * the form ff02::00XX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ ipaddr->s6_addr[1] = 0x02;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+ break;
+ default:
+ pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
}
- pr_debug("uncompressing %d + %d => ", prefcount, postcount);
- lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
+ ipaddr->s6_addr, 16);
return 0;
}
skb_reserve(frame->skb, sizeof(struct ipv6hdr));
skb_put(frame->skb, frame->length);
+ /* copy the first control block to keep a
+ * trace of the link-layer addresses in case
+ * of a link-local compressed address
+ */
+ memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
+
init_timer(&frame->timer);
/* time out is the same as for ipv6 - 60 sec */
frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
static int
lowpan_process_data(struct sk_buff *skb)
{
- struct ipv6hdr hdr;
+ struct ipv6hdr hdr = {};
u8 tmp, iphc0, iphc1, num_context = 0;
- u8 *_saddr, *_daddr;
+ const struct ieee802154_addr *_saddr, *_daddr;
int err;
lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
if (lowpan_fetch_skb_u8(skb, &iphc1))
goto drop;
- _saddr = mac_cb(skb)->sa.hwaddr;
- _daddr = mac_cb(skb)->da.hwaddr;
+ _saddr = &mac_cb(skb)->sa;
+ _daddr = &mac_cb(skb)->da;
pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
/*
* Flow Label carried in-line
break;
/* Traffic Class and Flow Label are elided */
case 3: /* 11b */
- hdr.priority = 0;
- hdr.flow_lbl[0] = 0;
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
default:
break;
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
- /* Source address uncompression */
- pr_debug("source address stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ if (iphc1 & LOWPAN_IPHC_SAC) {
+ /* Source address context based uncompression */
+ pr_debug("SAC bit is set. Handle context based source address.\n");
+ err = lowpan_uncompress_context_based_src_addr(
+ skb, &hdr.saddr, tmp);
+ } else {
+ /* Source address uncompression */
+ pr_debug("source address stateless compression\n");
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
+ }
+
+ /* Check on error of previous branch */
if (err)
goto drop;
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
- u8 prefix[] = {0xff, 0x02};
-
- pr_debug("dest: non context-based mcast compression\n");
- if (0 < tmp && tmp < 3) {
- if (lowpan_fetch_skb_u8(skb, &prefix[1]))
- goto drop;
- }
-
- err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
- lowpan_unc_mxconf[tmp], NULL);
+ err = lowpan_uncompress_multicast_daddr(
+ skb, &hdr.daddr, tmp);
if (err)
goto drop;
}
} else {
pr_debug("dest: stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
if (err)
goto drop;
}
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
+#define LOWPAN_IPHC_ADDR_00 0x00
+#define LOWPAN_IPHC_ADDR_01 0x01
+#define LOWPAN_IPHC_ADDR_02 0x02
+#define LOWPAN_IPHC_ADDR_03 0x03
+
#define LOWPAN_IPHC_SAC 0x40
-#define LOWPAN_IPHC_SAM_00 0x00
-#define LOWPAN_IPHC_SAM_01 0x10
-#define LOWPAN_IPHC_SAM_10 0x20
#define LOWPAN_IPHC_SAM 0x30
#define LOWPAN_IPHC_SAM_BIT 4
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+ void *data, const unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
#endif /* __6LOWPAN_H__ */
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ if (!IS_ERR(itn->fb_tunnel_dev))
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_unlock();
- if (IS_ERR(itn->fb_tunnel_dev))
- return PTR_ERR(itn->fb_tunnel_dev);
-
- return 0;
+ return PTR_RET(itn->fb_tunnel_dev);
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-#define IP_MAX_MTU 0xFFF0
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFF
#define RT_GC_TIMEOUT (300*HZ)
mtu = 576;
}
- if (mtu > IP_MAX_MTU)
- mtu = IP_MAX_MTU;
-
- return mtu;
+ return min_t(unsigned int, mtu, IP_MAX_MTU);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
if (!skb)
goto wait_for_memory;
+ /*
+ * All packets are restored as if they have
+ * already been sent.
+ */
+ if (tp->repair)
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
/*
* Check whether we can use HW checksum.
*/
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
- if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
- tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_reduction(sk, prior_unsacked, 0);
}
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
+/* Decide wheather to run the increase function of congestion control. */
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
- !tcp_in_cwnd_reduction(sk);
+ if (tcp_in_cwnd_reduction(sk))
+ return false;
+
+ /* If reordering is high then always grow cwnd whenever data is
+ * delivered regardless of its ordering. Otherwise stay conservative
+ * and only grow cwnd on in-order delivery in Open state, and retain
+ * cwnd in Disordered state (RFC5681). A stretched ACK with
+ * new SACK or ECE mark may first advance cwnd here and later reduce
+ * cwnd in tcp_fastretrans_alert() based on more states.
+ */
+ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
+ flag & FLAG_DATA_ACKED;
}
/* Check that window update is acceptable.
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
acked -= tp->packets_out;
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+
if (tcp_ack_is_dubious(sk, flag)) {
- /* Advance CWND, if state allows this. */
- if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
- } else {
- if (flag & FLAG_DATA_ACKED)
- tcp_cong_avoid(sk, ack, prior_in_flight);
}
-
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
*/
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
- u16 queue_mapping,
- bool nocache)
+ u16 queue_mapping)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{
- int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
+ int res = tcp_v4_send_synack(sk, NULL, req, 0);
if (!res)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
struct tcp_log {
ktime_t tstamp;
- __be32 saddr, daddr;
- __be16 sport, dport;
+ union {
+ struct sockaddr raw;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } src, dst;
u16 length;
u32 snd_nxt;
u32 snd_una;
u32 snd_wnd;
+ u32 rcv_wnd;
u32 snd_cwnd;
u32 ssthresh;
u32 srtt;
return bufsize - tcp_probe_used() - 1;
}
+#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \
+ do { \
+ si4.sin_family = AF_INET; \
+ si4.sin_port = inet->inet_##mem##port; \
+ si4.sin_addr.s_addr = inet->inet_##mem##addr; \
+ } while (0) \
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \
+ do { \
+ struct ipv6_pinfo *pi6 = inet->pinet6; \
+ si6.sin6_family = AF_INET6; \
+ si6.sin6_port = inet->inet_##mem##port; \
+ si6.sin6_addr = pi6->mem##addr; \
+ si6.sin6_flowinfo = 0; /* No need here. */ \
+ si6.sin6_scope_id = 0; /* No need here. */ \
+ } while (0)
+#else
+#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \
+ do { \
+ memset(&si6, 0, sizeof(si6)); \
+ } while (0)
+#endif
+
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
*/
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned int len)
+ const struct tcphdr *th, unsigned int len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
p->tstamp = ktime_get();
- p->saddr = inet->inet_saddr;
- p->sport = inet->inet_sport;
- p->daddr = inet->inet_daddr;
- p->dport = inet->inet_dport;
+ switch (sk->sk_family) {
+ case AF_INET:
+ tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
+ tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
+ break;
+ case AF_INET6:
+ tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
+ tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+ break;
+ default:
+ BUG();
+ }
+
p->length = skb->len;
p->snd_nxt = tp->snd_nxt;
p->snd_una = tp->snd_una;
p->snd_cwnd = tp->snd_cwnd;
p->snd_wnd = tp->snd_wnd;
+ p->rcv_wnd = tp->rcv_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
- "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
+ "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
- &p->saddr, ntohs(p->sport),
- &p->daddr, ntohs(p->dport),
- p->length, p->snd_nxt, p->snd_una,
- p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
+ &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
+ p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
{
int ret = -ENOMEM;
+ /* Warning: if the function signature of tcp_rcv_established,
+ * has been changed, you also have to change the signature of
+ * jtcp_rcv_established, otherwise you end up right here!
+ */
+ BUILD_BUG_ON(__same_type(tcp_rcv_established,
+ jtcp_rcv_established) == 0);
+
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
#define ACONF_DEBUG 2
#if ACONF_DEBUG >= 3
-#define ADBG(x) printk x
+#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define ADBG(x)
+#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot allocate memory for statistics; dev=%s.\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
}
if (snmp6_register_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
in6_dev_finish_destroy(ndev);
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
- ADBG(("ipv6_add_addr: already assigned\n"));
+ ADBG("ipv6_add_addr: already assigned\n");
err = -EEXIST;
goto out;
}
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
- ADBG(("ipv6_add_addr: malloc failed\n"));
+ ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
}
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
- ift = !max_addresses ||
- ipv6_count_addresses(idev) < max_addresses ?
- ipv6_add_addr(idev, &addr, NULL, tmp_plen,
- ipv6_addr_scope(&addr), addr_flags,
- tmp_valid_lft, tmp_prefered_lft) : NULL;
- if (IS_ERR_OR_NULL(ift)) {
+ ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+ ipv6_addr_scope(&addr), addr_flags,
+ tmp_valid_lft, tmp_prefered_lft);
+ if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
pr_info("%s: retry temporary address regeneration\n", __func__);
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
+static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
+{
+ memcpy(eui, dev->perm_addr, 3);
+ memcpy(eui + 5, dev->perm_addr + 3, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ return 0;
+}
+
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
+ case ARPHRD_TUNNEL6:
+ return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
}
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
- ADBG(("addrconf: prefix option too short\n"));
+ ADBG("addrconf: prefix option too short\n");
return;
}
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_INFINIBAND) &&
(dev->type != ARPHRD_IEEE802154) &&
- (dev->type != ARPHRD_IEEE1394)) {
+ (dev->type != ARPHRD_IEEE1394) &&
+ (dev->type != ARPHRD_TUNNEL6)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
return -1;
}
-static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
-{
- struct net_device *link_dev;
- struct net *net = dev_net(idev->dev);
-
- /* first try to inherit the link-local address from the link device */
- if (idev->dev->iflink &&
- (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- /* then try to inherit it from any device */
- for_each_netdev(net, link_dev) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- pr_debug("init ip6-ip6: add_linklocal failed\n");
-}
-
-/*
- * Autoconfigure tunnel with a link-local address so routing protocols,
- * DHCPv6, MLD etc. can be run over the virtual link
- */
-
-static void addrconf_ip6_tnl_config(struct net_device *dev)
-{
- struct inet6_dev *idev;
-
- ASSERT_RTNL();
-
- idev = addrconf_add_dev(dev);
- if (IS_ERR(idev)) {
- pr_debug("init ip6-ip6: add_dev failed\n");
- return;
- }
- ip6_tnl_add_linklocal(idev);
-}
-
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
addrconf_gre_config(dev);
break;
#endif
- case ARPHRD_TUNNEL6:
- addrconf_ip6_tnl_config(dev);
- break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
- ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
- now, next, next_sec, next_sched));
+ ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
-
#define MLD_QRV_DEFAULT 2
+/* RFC3810, 8.1 Query Version Distinctions */
+#define MLD_V1_QUERY_LEN 24
+#define MLD_V2_QUERY_LEN_MIN 28
+
#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
(idev)->cnf.force_mld_version == 1 || \
((idev)->mc_v1_seen && \
static void mld_gq_start_timer(struct inet6_dev *idev)
{
- int tv = net_random() % idev->mc_maxdelay;
+ unsigned long tv = net_random() % idev->mc_maxdelay;
idev->mc_gq_running = 1;
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
+static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
{
- int tv = net_random() % delay;
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_dad_start_timer(struct inet6_dev *idev, int delay)
+static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
{
- int tv = net_random() % delay;
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
in6_dev_hold(idev);
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- if (len == 24) {
+ if (len == MLD_V1_QUERY_LEN) {
int switchback;
/* MLDv1 router present */
- /* Translate milliseconds to jiffies */
- max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
-
+ max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
__in6_dev_put(idev);
/* clear deleted report items */
mld_clear_delrec(idev);
- } else if (len >= 28) {
+ } else if (len >= MLD_V2_QUERY_LEN_MIN) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
- if (!max_delay)
- max_delay = 1;
+
+ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mlh2->mld2q_mrc))), 1UL);
+
idev->mc_maxdelay = max_delay;
+
if (mlh2->mld2q_qrv)
idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
return;
- if (!ndopts.nd_opts_rh)
+ if (!ndopts.nd_opts_rh) {
+ ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
return;
+ }
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
if (event == NETDEV_DOWN)
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE;
}
ipv6_hdr(head)->payload_len = htons(payload_len);
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
IP6CB(head)->nhoff = nhoff;
+ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
if (head->ip_summed == CHECKSUM_COMPLETE)
struct net *net = dev_net(skb_dst(skb)->dev);
int evicted;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
return 1;
}
}
EXPORT_SYMBOL_GPL(ip6_redirect);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = msg->dest;
+ fl6.saddr = iph->daddr;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
static DEFINE_MUTEX(pfkey_mutex);
#define DUMMY_MARK 0
-static struct xfrm_mark dummy_mark = {0, 0};
+static const struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
return 0;
}
-static u8 sadb_ext_min_len[] = {
+static const u8 sadb_ext_min_len[] = {
[SADB_EXT_RESERVED] = (u8) 0,
[SADB_EXT_SA] = (u8) sizeof(struct sadb_sa),
[SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime),
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
- if (!x->props.family) {
- err = -EAFNOSUPPORT;
- goto out;
- }
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
- if (!xp->family) {
- err = -EINVAL;
- goto out;
- }
xp->selector.family = xp->family;
xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs);
-static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
+static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
[SADB_UPDATE] = pfkey_add,
endif # NF_CONNTRACK
-# transparent proxy support
-config NETFILTER_TPROXY
- tristate "Transparent proxying support"
- depends on IP_NF_MANGLE
- depends on NETFILTER_ADVANCED
- help
- This option enables transparent proxying support, that is,
- support for handling non-locally bound IPv4 TCP and UDP sockets.
- For it to work you will have to configure certain iptables rules
- and use policy routing. For more information on how to set it up
- see Documentation/networking/tproxy.txt.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
this clone be rerouted to another nexthop.
config NETFILTER_XT_TARGET_TPROXY
- tristate '"TPROXY" target support'
- depends on NETFILTER_TPROXY
+ tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
+ depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
REDIRECT. It can only be used in the mangle table and is useful
to redirect traffic to a transparent proxy. It does _not_ depend
on Netfilter connection tracking and NAT, unlike REDIRECT.
+ For it to work you will have to configure certain iptables rules
+ and use policy routing. For more information on how to set it up
+ see Documentation/networking/tproxy.txt.
To compile it as a module, choose M here. If unsure, say N.
config NETFILTER_XT_MATCH_SOCKET
tristate '"socket" match support'
- depends on NETFILTER_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
-# transparent proxy support
-obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
-
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
+void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
+ __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
-void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
+void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
- void (*attach)(struct sk_buff *, struct sk_buff *);
+ void (*attach)(struct sk_buff *, const struct sk_buff *);
if (skb->nfct) {
rcu_read_lock();
spin_lock_bh(&svc->sched_lock);
tbl->dead = 1;
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
switch (iph->protocol) {
case IPPROTO_TCP:
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (unlikely(th == NULL))
+ return 0;
port = th->source;
break;
case IPPROTO_UDP:
uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL))
+ return 0;
port = uh->source;
break;
case IPPROTO_SCTP:
sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+ if (unlikely(sh == NULL))
+ return 0;
port = sh->source;
break;
default:
nf_conntrack_free(ct);
}
-void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
}
-EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
static void death_by_event(unsigned long ul_conntrack)
{
nf_ct_put(ct);
}
-void nf_ct_dying_timeout(struct nf_conn *ct)
+static void nf_ct_dying_timeout(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
}
-EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
-static void death_by_timeout(unsigned long ul_conntrack)
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
- struct nf_conn *ct = (void *)ul_conntrack;
struct nf_conn_tstamp *tstamp;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_to_ns(ktime_get_real());
- if (!test_bit(IPS_DYING_BIT, &ct->status) &&
- unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
+ if (!nf_ct_is_dying(ct) &&
+ unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
+ portid, report) < 0)) {
/* destroy event was not delivered */
nf_ct_delete_from_lists(ct);
nf_ct_dying_timeout(ct);
- return;
+ return false;
}
set_bit(IPS_DYING_BIT, &ct->status);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+ nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
/*
return dropped;
if (del_timer(&ct->timeout)) {
- death_by_timeout((unsigned long)ct);
- /* Check if we indeed killed this entry. Reliable event
- delivery may have inserted it into the dying list. */
- if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ if (nf_ct_delete(ct, 0, 0)) {
dropped = 1;
NF_CT_STAT_INC_ATOMIC(net, early_drop);
}
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
-static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
- void *data)
+ void *data, u32 portid, int report)
{
struct nf_conn *ct;
unsigned int bucket = 0;
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
- death_by_timeout((unsigned long)ct);
+ nf_ct_delete(ct, portid, report);
+
/* ... else the timer will get him soon. */
nf_ct_put(ct);
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
-struct __nf_ct_flush_report {
- u32 portid;
- int report;
-};
-
-static int kill_report(struct nf_conn *i, void *data)
-{
- struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
- struct nf_conn_tstamp *tstamp;
-
- tstamp = nf_conn_tstamp_find(i);
- if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
-
- /* If we fail to deliver the event, death_by_timeout() will retry */
- if (nf_conntrack_event_report(IPCT_DESTROY, i,
- fr->portid, fr->report) < 0)
- return 1;
-
- /* Avoid the delivery of the destroy event in death_by_timeout(). */
- set_bit(IPS_DYING_BIT, &i->status);
- return 1;
-}
-
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
{
- struct __nf_ct_flush_report fr = {
- .portid = portid,
- .report = report,
- };
- nf_ct_iterate_cleanup(net, kill_report, &fr);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_ct_iterate_cleanup(net, kill_all, NULL);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
return ret;
}
-s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
* published by the Free Software Foundation.
*/
-#include <linux/ctype.h>
#include <linux/export.h>
-#include <linux/jhash.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
}
}
- if (del_timer(&ct->timeout)) {
- if (nf_conntrack_event_report(IPCT_DESTROY, ct,
- NETLINK_CB(skb).portid,
- nlmsg_report(nlh)) < 0) {
- nf_ct_delete_from_lists(ct);
- /* we failed to report the event, try later */
- nf_ct_dying_timeout(ct);
- nf_ct_put(ct);
- return 0;
- }
- /* death_by_timeout would report the event again */
- set_bit(IPS_DYING_BIT, &ct->status);
- nf_ct_delete_from_lists(ct);
- nf_ct_put(ct);
- }
+ if (del_timer(&ct->timeout))
+ nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
+
nf_ct_put(ct);
return 0;
return err == -EAGAIN ? -ENOBUFS : err;
}
+static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+ [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
+ [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
+ [CTA_EXPECT_ID] = { .type = NLA_U32 },
+ [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN - 1 },
+ [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
+ [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
+};
+
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask);
+
#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
static size_t
ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
return ret;
}
+static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
+ const struct nf_conn *ct,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
+{
+ int err;
+
+ err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
+ nf_ct_l3num(ct));
+ if (err < 0)
+ return err;
+
+ return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
+ nf_ct_l3num(ct));
+}
+
+static int
+ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report)
+{
+ struct nlattr *cda[CTA_EXPECT_MAX+1];
+ struct nf_conntrack_tuple tuple, mask;
+ struct nf_conntrack_helper *helper;
+ struct nf_conntrack_expect *exp;
+ int err;
+
+ err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
+ if (err < 0)
+ return err;
+
+ err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
+ ct, &tuple, &mask);
+ if (err < 0)
+ return err;
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL)
+ return -EOPNOTSUPP;
+ }
+
+ exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
+ helper, &tuple, &mask);
+ if (IS_ERR(exp))
+ return PTR_ERR(exp);
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0) {
+ nf_ct_expect_put(exp);
+ return err;
+ }
+
+ return 0;
+}
+
static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
.build_size = ctnetlink_nfqueue_build_size,
.build = ctnetlink_nfqueue_build,
.parse = ctnetlink_nfqueue_parse,
+ .attach_expect = ctnetlink_nfqueue_attach_expect,
};
#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
return err;
}
-static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
- [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
- [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
- [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
- [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
- [CTA_EXPECT_ID] = { .type = NLA_U32 },
- [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
- .len = NF_CT_HELPER_NAME_LEN - 1 },
- [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
- [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
- [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
- [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
- [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
-};
-
static int
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
#endif
}
-static int
-ctnetlink_create_expect(struct net *net, u16 zone,
- const struct nlattr * const cda[],
- u_int8_t u3,
- u32 portid, int report)
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
{
- struct nf_conntrack_tuple tuple, mask, master_tuple;
- struct nf_conntrack_tuple_hash *h = NULL;
+ u_int32_t class = 0;
struct nf_conntrack_expect *exp;
- struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conntrack_helper *helper = NULL;
- u_int32_t class = 0;
- int err = 0;
-
- /* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
- if (err < 0)
- return err;
-
- /* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(net, zone, &master_tuple);
- if (!h)
- return -ENOENT;
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- /* Look for helper of this expectation */
- if (cda[CTA_EXPECT_HELP_NAME]) {
- const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
-
- helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper == NULL) {
-#ifdef CONFIG_MODULES
- if (request_module("nfct-helper-%s", helpname) < 0) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- helper = __nf_conntrack_helper_find(helpname,
- nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper) {
- err = -EAGAIN;
- goto out;
- }
-#endif
- err = -EOPNOTSUPP;
- goto out;
- }
- }
+ int err;
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
- if (class > helper->expect_class_max) {
- err = -EINVAL;
- goto out;
- }
+ if (class > helper->expect_class_max)
+ return ERR_PTR(-EINVAL);
}
exp = nf_ct_expect_alloc(ct);
- if (!exp) {
- err = -ENOMEM;
- goto out;
- }
+ if (!exp)
+ return ERR_PTR(-ENOMEM);
+
help = nfct_help(ct);
if (!help) {
if (!cda[CTA_EXPECT_TIMEOUT]) {
exp->class = class;
exp->master = ct;
exp->helper = helper;
- memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
- memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
- exp->mask.src.u.all = mask.src.u.all;
+ exp->tuple = *tuple;
+ exp->mask.src.u3 = mask->src.u3;
+ exp->mask.src.u.all = mask->src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
- exp, u3);
+ exp, nf_ct_l3num(ct));
if (err < 0)
goto err_out;
}
- err = nf_ct_expect_related_report(exp, portid, report);
+ return exp;
err_out:
nf_ct_expect_put(exp);
-out:
- nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
+ return ERR_PTR(err);
+}
+
+static int
+ctnetlink_create_expect(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
+ u_int8_t u3, u32 portid, int report)
+{
+ struct nf_conntrack_tuple tuple, mask, master_tuple;
+ struct nf_conntrack_tuple_hash *h = NULL;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ struct nf_conn *ct;
+ int err;
+
+ /* caller guarantees that those three CTA_EXPECT_* exist */
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ if (err < 0)
+ return err;
+
+ /* Look for master conntrack of this expectation */
+ h = nf_conntrack_find_get(net, zone, &master_tuple);
+ if (!h)
+ return -ENOENT;
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto err_ct;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ }
+
+ exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
+ if (IS_ERR(exp)) {
+ err = PTR_ERR(exp);
+ goto err_ct;
+ }
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0)
+ goto err_exp;
+
+ return 0;
+err_exp:
+ nf_ct_expect_put(exp);
+err_ct:
+ nf_ct_put(ct);
return err;
}
nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
}
#ifdef CONFIG_NF_NAT_NEEDED
-static inline s16 nat_offset(const struct nf_conn *ct,
+static inline s32 nat_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
- s16 receiver_offset;
+ s32 receiver_offset;
bool res, in_recv_win;
/*
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
x->offset_before, x->offset_after, x->correction_pos);
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
/* Setup TCP sequence correction given this change at this sequence */
static inline void
adjust_tcp_sequence(u32 seq,
pr_debug("adjust_tcp_sequence: Seq_offset before: ");
DUMP_OFFSET(this_way);
- spin_lock_bh(&nf_nat_seqofs_lock);
+ spin_lock_bh(&ct->lock);
/* SYN adjust. If it's uninitialized, or this is after last
* correction, record it: we don't handle more than one
this_way->offset_before = this_way->offset_after;
this_way->offset_after += sizediff;
}
- spin_unlock_bh(&nf_nat_seqofs_lock);
+ spin_unlock_bh(&ct->lock);
pr_debug("adjust_tcp_sequence: Seq_offset after: ");
DUMP_OFFSET(this_way);
}
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
+/* Get the offset value, for conntrack. Caller must have the conntrack locked */
+s32 nf_nat_get_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way;
- s16 offset;
if (!nat)
return 0;
this_way = &nat->seq[dir];
- spin_lock_bh(&nf_nat_seqofs_lock);
- offset = after(seq, this_way->correction_pos)
+ return after(seq, this_way->correction_pos)
? this_way->offset_after : this_way->offset_before;
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- return offset;
}
/* Frobs data inside this packet, which is linear. */
}
void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off)
+ __be32 seq, s32 off)
{
if (!off)
return;
struct tcphdr *tcph;
int dir;
__be32 newseq, newack;
- s16 seqoff, ackoff;
+ s32 seqoff, ackoff;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way, *other_way;
+ int res;
dir = CTINFO2DIR(ctinfo);
return 0;
tcph = (void *)skb->data + protoff;
+ spin_lock_bh(&ct->lock);
if (after(ntohl(tcph->seq), this_way->correction_pos))
seqoff = this_way->offset_after;
else
tcph->seq = newseq;
tcph->ack_seq = newack;
- return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ spin_unlock_bh(&ct->lock);
+
+ return res;
}
/* Setup NAT on this expected conntrack so it follows master. */
+++ /dev/null
-/*
- * Transparent proxy support for Linux/iptables
- *
- * Copyright (c) 2006-2007 BalaBit IT Ltd.
- * Author: Balazs Scheidler, Krisztian Kovacs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/net.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <net/udp.h>
-#include <net/netfilter/nf_tproxy_core.h>
-
-
-static void
-nf_tproxy_destructor(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- skb->sk = NULL;
- skb->destructor = NULL;
-
- if (sk)
- sock_put(sk);
-}
-
-/* consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
- /* assigning tw sockets complicates things; most
- * skb->sk->X checks would have to test sk->sk_state first */
- if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put(inet_twsk(sk));
- return;
- }
-
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = nf_tproxy_destructor;
-}
-EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
-
-static int __init nf_tproxy_init(void)
-{
- pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
- pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
- return 0;
-}
-
-module_init(nf_tproxy_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Krisztian Kovacs");
-MODULE_DESCRIPTION("Transparent proxy support core routines");
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
[NFQA_CT] = { .type = NLA_UNSPEC },
+ [NFQA_EXP] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
if (entry == NULL)
return -ENOENT;
- rcu_read_lock();
- if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ if (nfqa[NFQA_CT]) {
ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+ if (ct && nfqa[NFQA_EXP]) {
+ nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh));
+ }
+ }
if (nfqa[NFQA_PAYLOAD]) {
u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
if (ct)
nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
- rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
if ((ct->status & IPS_NAT_MASK) && diff)
nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
}
+
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ struct nfq_ct_hook *nfq_ct;
+
+ if (nf_ct_is_untracked(ct))
+ return 0;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return -EOPNOTSUPP;
+
+ return nfq_ct->attach_expect(attr, ct, portid, report);
+}
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
+#include <net/tcp.h>
#include <net/inet_sock.h>
+#include <net/inet_hashtables.h>
#include <linux/inetdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/inet6_hashtables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
-#include <net/netfilter/nf_tproxy_core.h>
#include <linux/netfilter/xt_TPROXY.h>
+enum nf_tproxy_lookup_t {
+ NFT_LOOKUP_LISTENER,
+ NFT_LOOKUP_ESTABLISHED,
+};
+
static bool tproxy_sk_is_transparent(struct sock *sk)
{
if (sk->sk_state != TCP_TIME_WAIT) {
return laddr ? laddr : daddr;
}
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+static inline struct sock *
+nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, dport,
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = inet_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
+ protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+
+#ifdef XT_TPROXY_HAVE_IPV6
+static inline struct sock *
+nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet6_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, ntohs(dport),
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, ntohs(dport),
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
+ protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+#endif
+
/**
* tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
return sk;
}
+/* assign a socket to the skb -- consumes sk */
+static void
+nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
static unsigned int
tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
return -EINVAL;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
- pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+ pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
return -EINVAL;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
-#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_SOCKET_HAVE_IPV6 1
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/inet6_hashtables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
return 0;
}
+/* "socket" match based redirection (no specific rule)
+ * ===================================================
+ *
+ * There are connections with dynamic endpoints (e.g. FTP data
+ * connection) that the user is unable to add explicit rules
+ * for. These are taken care of by a generic "socket" rule. It is
+ * assumed that the proxy application is trusted to open such
+ * connections without explicit iptables rule (except of course the
+ * generic 'socket' rule). In this case the following sockets are
+ * matched in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple
+ *
+ * - match: if there's a non-zero bound listener (possibly with a
+ * non-local address) We don't accept zero-bound listeners, since
+ * then local services could intercept traffic going through the
+ * box.
+ */
+static struct sock *
+xt_socket_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return __inet_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+ return NULL;
+}
+
static bool
socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
#endif
if (!sk)
- sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+ sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
return 0;
}
+static struct sock *
+xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return inet6_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+
+ return NULL;
+}
+
static bool
socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
}
if (!sk)
- sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+ sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
struct net *net = sock_net(skb->sk);
int chains_to_skip = cb->args[0];
int fams_to_skip = cb->args[1];
- bool need_locking = chains_to_skip || fams_to_skip;
-
- if (need_locking)
- genl_lock();
for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
n = 0;
cb->args[0] = i;
cb->args[1] = n;
- if (need_locking)
- genl_unlock();
-
return skb->len;
}
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
+ st.stats3.tp_packets += st.stats3.tp_drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
+ st.stats1.tp_packets += st.stats1.tp_drops;
data = &st.stats1;
}
if (sp == asoc->peer.primary_path)
printl("*");
- if (sp->ipaddr.sa.sa_family == AF_INET)
- printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
- else
- printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
-
- printl("%2u %8u %8u %8u %8u %8u ",
- sp->state, sp->cwnd, sp->ssthresh,
+ printl("%pISc %2u %8u %8u %8u %8u %8u ",
+ &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
sp->flight_size, sp->partial_bytes_acked,
sp->pathmtu);
}
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -ENOBUFS;
cookie.msg = msg;
cookie.idx = key_idx;
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
genlmsg_cancel(skb, hdr);
break;
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
struct net_device *dev, u16 reason, bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err = 0;
ASSERT_WDEV_LOCK(wdev);
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
- if (wdev->conn) {
+ if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
- } else if (!rdev->ops->disconnect) {
+ else if (!rdev->ops->disconnect)
cfg80211_mlme_down(rdev, dev);
- err = 0;
- } else {
+ else if (wdev->current_bss)
err = rdev_disconnect(rdev, dev, reason);
- }
return err;
}
{
BUG_ON(!policy->walk.dead);
- if (del_timer(&policy->timer))
+ if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
security_xfrm_policy_free(policy->security);
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route. */
dst_release(dst);
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+ CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+static struct xfrm_state *__find_acq_core(struct net *net,
+ const struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u8 proto,
const xfrm_address_t *daddr,
- const xfrm_address_t *saddr, int create)
+ const xfrm_address_t *saddr,
+ int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct xfrm_state *x;
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
-xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
- const xfrm_address_t *daddr, const xfrm_address_t *saddr,
- int create, unsigned short family)
+xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+ u8 proto, const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;