S: Canada
N: Zwane Mwaikambo
-E: zwane@linuxpower.ca
-W: http://function.linuxpower.ca
+E: zwane@arm.linux.org.uk
D: Various driver hacking
D: Lowlevel x86 kernel hacking
D: General debugging
attached to /proc/acpi/hotkey/poll_method, which is dnyamically
created. Please use command "cat /proc/acpi/hotkey/polling_method"
to retrieve it.
+
+Note: Use cmdline "acpi_generic_hotkey" to over-ride
+loading any platform specific drivers.
acpi_fake_ecdt [HW,ACPI] Workaround failure due to BIOS lacking ECDT
+ acpi_generic_hotkey [HW,ACPI]
+ Allow consolidated generic hotkey driver to
+ over-ride platform specific driver.
+ See also Documentation/acpi-hotkey.txt.
+
ad1816= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>
See also Documentation/sound/oss/AD1816.
in the kernel as they aren't compatible with hotplug or PCI domains or
having sane locking.
-pcibios_present() and Since ages, you don't need to test presence
-pci_present() of PCI subsystem when trying to talk to it.
- If it's not there, the list of PCI devices
- is empty and all functions for searching for
- devices just return NULL.
-pcibios_(read|write)_* Superseded by their pci_(read|write)_*
- counterparts.
-pcibios_find_* Superseded by their pci_get_* counterparts.
-pci_for_each_dev() Superseded by pci_get_device()
-pci_for_each_dev_reverse() Superseded by pci_find_device_reverse()
-pci_for_each_bus() Superseded by pci_find_next_bus()
pci_find_device() Superseded by pci_get_device()
pci_find_subsys() Superseded by pci_get_subsys()
pci_find_slot() Superseded by pci_get_slot()
-pcibios_find_class() Superseded by pci_get_class()
-pci_find_class() Superseded by pci_get_class()
-pci_(read|write)_*_nodev() Superseded by pci_bus_(read|write)_*()
P: LinuxTV.org Project
M: linux-dvb-maintainer@linuxtv.org
L: linux-dvb@linuxtv.org (subscription required)
-W: http://linuxtv.org/developer/dvb.xml
+W: http://linuxtv.org/
S: Supported
EATA-DMA SCSI DRIVER
OPL3-SA2, SA3, and SAx DRIVER
P: Zwane Mwaikambo
-M: zwane@commfireservices.com
+M: zwane@arm.linux.org.uk
L: linux-sound@vger.kernel.org
S: Maintained
M: greg@kroah.com
S: Maintained
+PCIE HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
L: http://lists.infradead.org/mailman/listinfo/linux-pcmcia
SC1200 WDT DRIVER
P: Zwane Mwaikambo
-M: zwane@commfireservices.com
+M: zwane@arm.linux.org.uk
S: Maintained
SCHEDULER
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
+SHPC HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
SPARC (sparc32):
P: William L. Irwin
M: wli@holomorphy.com
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 13
-EXTRAVERSION =-rc6
+EXTRAVERSION =
NAME=Woozy Numbat
# *DOCUMENTATION*
config NUMA
bool "NUMA Support (EXPERIMENTAL)"
- depends on DISCONTIGMEM
+ depends on DISCONTIGMEM && BROKEN
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
" br 1b\n"
".previous"
: "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
- : "1" (lock->lock), "2" (stuck) : "memory");
+ : "m" (lock->lock), "2" (stuck) : "memory");
if (stuck < 0) {
printk(KERN_WARNING
".previous"
: "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
"=&r" (stuck_lock), "=&r" (stuck_reader)
- : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
+ : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
if (stuck_lock < 0) {
printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
" br 1b\n"
".previous"
: "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
- : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
+ : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
if (stuck_lock < 0) {
printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
model->reg_setup(®, ctr, &sys);
/* Configure the registers on all cpus. */
- smp_call_function(model->cpu_setup, ®, 0, 1);
+ (void)smp_call_function(model->cpu_setup, ®, 0, 1);
model->cpu_setup(®);
return 0;
}
static int
op_axp_start(void)
{
- smp_call_function(op_axp_cpu_start, NULL, 0, 1);
+ (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1);
op_axp_cpu_start(NULL);
return 0;
}
static void
op_axp_stop(void)
{
- smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
+ (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
op_axp_cpu_stop(NULL);
}
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
- depends on EXPERIMENTAL #&& n
+ depends on EXPERIMENTAL && BROKEN #&& n
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
/* 310 */ .long sys_request_key
.long sys_keyctl
.long sys_semtimedop
+/* vserver */ .long sys_ni_syscall
+ .long sys_ioprio_set
+/* 315 */ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
__syscall_end:
.rept NR_syscalls - (__syscall_end - __syscall_start) / 4
notify_die("unknown data abort code", regs, &info, instr, 0);
}
-volatile void __bug(const char *file, int line, void *data)
+void __attribute__((noreturn)) __bug(const char *file, int line, void *data)
{
printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
if (data)
static struct resource coyote_flash_resource = {
.start = COYOTE_FLASH_BASE,
- .end = COYOTE_FLASH_BASE + COYOTE_FLASH_SIZE,
+ .end = COYOTE_FLASH_BASE + COYOTE_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct resource gtwx5715_flash_resource = {
.start = GTWX5715_FLASH_BASE,
- .end = GTWX5715_FLASH_BASE + GTWX5715_FLASH_SIZE,
+ .end = GTWX5715_FLASH_BASE + GTWX5715_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct resource ixdp425_flash_resource = {
.start = IXDP425_FLASH_BASE,
- .end = IXDP425_FLASH_BASE + IXDP425_FLASH_SIZE,
+ .end = IXDP425_FLASH_BASE + IXDP425_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
* 28-Sep-2004 BJD Updates for new serial port bits
* 04-Nov-2004 BJD Updated UART configuration process
* 10-Jan-2005 BJD Removed s3c2410_clock_tick_rate
+ * 13-Aug-2005 DA Removed UART from initial I/O mappings
*/
#include <linux/kernel.h>
IODESC_ENT(USBHOST),
IODESC_ENT(CLKPWR),
IODESC_ENT(LCD),
- IODESC_ENT(UART),
IODESC_ENT(TIMER),
IODESC_ENT(ADC),
- IODESC_ENT(WATCHDOG)
+ IODESC_ENT(WATCHDOG),
};
static struct resource s3c_uart0_resource[] = {
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
- depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE
+ depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE
default y if CPU_ARM925T
help
Say Y here to use the data cache in writethrough mode. Unless you
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+ mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
mcr p15, 0, r1, c13, c0, 1 @ set context ID
#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5)
#ifdef CONFIG_FPE_NWFPE_XP
-static inline const floatx80 getExtendedConstant(const unsigned int nIndex)
+static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex)
{
extern const floatx80 floatx80Constant[];
return floatx80Constant[nIndex];
}
#endif
-static inline const float64 getDoubleConstant(const unsigned int nIndex)
+static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex)
{
extern const float64 float64Constant[];
return float64Constant[nIndex];
}
-static inline const float32 getSingleConstant(const unsigned int nIndex)
+static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex)
{
extern const float32 float32Constant[];
return float32Constant[nIndex];
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
- float_raise( float_flag_invalid );
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloat32Sign( a );
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
- float_raise( float_flag_invalid );
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloat32Sign( a );
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
- float_raise( float_flag_invalid );
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloat64Sign( a );
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
- float_raise( float_flag_invalid );
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloat64Sign( a );
) {
if ( floatx80_is_signaling_nan( a )
|| floatx80_is_signaling_nan( b ) ) {
- roundData->exception |= float_flag_invalid;
+ float_raise( float_flag_invalid );
}
return 0;
}
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (bits64) ( extractFloatx80Frac( b )<<1 ) )
) {
- roundData->exception |= float_flag_invalid;
+ float_raise( float_flag_invalid );
return 0;
}
aSign = extractFloatx80Sign( a );
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (bits64) ( extractFloatx80Frac( b )<<1 ) )
) {
- roundData->exception |= float_flag_invalid;
+ float_raise( float_flag_invalid );
return 0;
}
aSign = extractFloatx80Sign( a );
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (bits64) ( extractFloatx80Frac( b )<<1 ) )
) {
- roundData->exception |= float_flag_invalid;
+ float_raise( float_flag_invalid );
return 0;
}
return
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (bits64) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
- roundData->exception |= float_flag_invalid;
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloatx80Sign( a );
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (bits64) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
- roundData->exception |= float_flag_invalid;
- }
+ /* Do nothing, even if NaN as we're quiet */
return 0;
}
aSign = extractFloatx80Sign( a );
static int __init detect_init_APIC (void)
{
u32 h, l, features;
- extern void get_cpu_vendor(struct cpuinfo_x86*);
/* Disabled by kernel option? */
if (enable_local_apic < 0)
return -1;
- /* Workaround for us being called before identify_cpu(). */
- get_cpu_vendor(&boot_cpu_data);
-
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
wrmsr(MSR_P6_EVNTSEL0, 0, 0);
break;
case 15:
- if (boot_cpu_data.x86_model > 0x3)
+ if (boot_cpu_data.x86_model > 0x4)
break;
wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
setup_p6_watchdog();
break;
case 15:
- if (boot_cpu_data.x86_model > 0x3)
+ if (boot_cpu_data.x86_model > 0x4)
return;
if (!setup_p4_watchdog())
*/
cwd = get_fpu_cwd(task);
swd = get_fpu_swd(task);
- switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ switch (swd & ~cwd & 0x3f) {
case 0x000:
default:
break;
case 0x001: /* Invalid Op */
- case 0x041: /* Stack Fault */
- case 0x241: /* Stack Fault | Direction */
+ /*
+ * swd & 0x240 == 0x040: Stack Underflow
+ * swd & 0x240 == 0x240: Stack Overflow
+ * User must clear the SF bit (0x40) if set
+ */
info.si_code = FPE_FLTINV;
- /* Should we clear the SF or let user space do it ???? */
break;
case 0x002: /* Denormalize */
case 0x010: /* Underflow */
endif
+if PM
+
+source "arch/ia64/kernel/cpufreq/Kconfig"
+
+endif
+
endmenu
if !IA64_HP_SIM
config PCI
bool "PCI support"
help
- Find out whether you have a PCI motherboard. PCI is the name of a
- bus system, i.e. the way the CPU talks to the other stuff inside
- your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
- VESA. If you have PCI, say Y, otherwise N.
-
- The PCI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, contains valuable
- information about which PCI hardware does work under Linux and which
- doesn't.
+ Real IA-64 machines all have PCI/PCI-X/PCI Express busses. Say Y
+ here unless you are using a simulator without PCI support.
config PCI_DOMAINS
bool
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.10
-# Mon Jan 10 13:57:35 2005
+# Linux kernel version: 2.6.13-rc6
+# Tue Aug 16 14:40:41 2005
#
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=20
CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
+CONFIG_CPUSETS=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-CONFIG_CPUSETS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
#
# Loadable module support
CONFIG_TIME_INTERPOLATION=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_IA64_UNCACHED_ALLOCATOR=y
# CONFIG_IA64_GENERIC is not set
# CONFIG_IA64_DIG is not set
# CONFIG_IA64_HP_ZX1 is not set
+# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
CONFIG_IA64_SGI_SN2=y
# CONFIG_IA64_HP_SIM is not set
# CONFIG_ITANIUM is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
CONFIG_IA64_L1_CACHE_SHIFT=7
CONFIG_NUMA=y
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
# CONFIG_IA64_CYCLONE is not set
CONFIG_IOSAPIC=y
+CONFIG_IA64_SGI_SN_XP=m
CONFIG_FORCE_MAX_ZONEORDER=18
CONFIG_SMP=y
CONFIG_NR_CPUS=512
# CONFIG_HOTPLUG_CPU is not set
+CONFIG_SCHED_SMT=y
CONFIG_PREEMPT=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+CONFIG_DISCONTIGMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_DISCONTIGMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
#
# Power management and ACPI
#
+CONFIG_PM=y
CONFIG_ACPI=y
#
CONFIG_ACPI_INTERPRETER=y
# CONFIG_ACPI_BUTTON is not set
CONFIG_ACPI_VIDEO=m
+CONFIG_ACPI_HOTKEY=m
# CONFIG_ACPI_FAN is not set
# CONFIG_ACPI_PROCESSOR is not set
CONFIG_ACPI_NUMA=y
# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
+# CONFIG_PCI_DEBUG is not set
#
# PCI Hotplug Support
# CONFIG_HOTPLUG_PCI_FAKE is not set
# CONFIG_HOTPLUG_PCI_ACPI is not set
# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_PCIE is not set
# CONFIG_HOTPLUG_PCI_SHPC is not set
CONFIG_HOTPLUG_PCI_SGI=y
# CONFIG_PCCARD is not set
#
-# PC-card bridges
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
#
# Device Drivers
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=m
+CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_HPT366 is not set
# CONFIG_BLK_DEV_SC1200 is not set
# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_PDC202XX_NEW is not set
CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
# CONFIG_SCSI_ATA_PIIX is not set
# CONFIG_SCSI_SATA_NV is not set
# CONFIG_SCSI_SATA_PROMISE is not set
+# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_SX4 is not set
# CONFIG_SCSI_SATA_SIL is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
# CONFIG_SCSI_SATA_VIA is not set
CONFIG_SCSI_SATA_VITESSE=y
-# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
CONFIG_SCSI_QLOGIC_1280=y
# CONFIG_SCSI_QLOGIC_1280_1040 is not set
CONFIG_SCSI_QLA2300=y
CONFIG_SCSI_QLA2322=y
# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_EMC=m
#
# Fusion MPT device support
#
CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_FC=y
CONFIG_FUSION_MAX_SGE=128
CONFIG_FUSION_CTL=m
# CONFIG_I2O is not set
#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-CONFIG_NETLINK_DEV=y
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-CONFIG_IPV6=m
-# CONFIG_IPV6_PRIVACY is not set
-# CONFIG_INET6_AH is not set
-# CONFIG_INET6_ESP is not set
-# CONFIG_INET6_IPCOMP is not set
-# CONFIG_INET6_TUNNEL is not set
-# CONFIG_IPV6_TUNNEL is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
+# Network device support
#
-# CONFIG_NET_PKTGEN is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SKGE is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=y
+# CONFIG_BNX2 is not set
#
# Ethernet (10000 Mbit)
# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
#
# ISDN subsystem
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-# CONFIG_SERIO is not set
-# CONFIG_SERIO_I8042 is not set
-
#
# Input Device Drivers
#
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
#
# Character devices
#
# CONFIG_CYCLADES is not set
# CONFIG_MOXA_SMARTIO is not set
# CONFIG_ISI is not set
-# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_SX is not set
# CONFIG_STALDRV is not set
CONFIG_SGI_SNSC=y
CONFIG_SGI_TIOCX=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_SGI_L1_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
CONFIG_SERIAL_SGI_IOC4=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_RAW_DRIVER=m
# CONFIG_HPET is not set
CONFIG_MAX_RAW_DEVS=256
+# CONFIG_HANGCHECK_TIMER is not set
CONFIG_MMTIMER=y
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
#
# I2C support
#
# CONFIG_I2C is not set
+# CONFIG_I2C_SENSOR is not set
#
# Dallas's 1-wire bus
#
# CONFIG_W1 is not set
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+
#
# Misc devices
#
#
# USB support
#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
# CONFIG_USB_DEVICEFS is not set
# CONFIG_USB_BANDWIDTH is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
#
# USB Host Controller Drivers
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_MOUSE is not set
# CONFIG_USB_AIPTEK is not set
# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
#
# USB Imaging devices
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
#
# USB port drivers
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
#
-# USB ATM/DSL drivers
+# USB DSL modem support
#
#
# InfiniBand support
#
CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_VERBS=m
CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
CONFIG_INFINIBAND_IPOIB=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
+
+#
+# XFS support
+#
CONFIG_XFS_FS=y
+CONFIG_XFS_EXPORT=y
CONFIG_XFS_RT=y
CONFIG_XFS_QUOTA=y
# CONFIG_XFS_SECURITY is not set
CONFIG_XFS_POSIX_ACL=y
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
CONFIG_QUOTA=y
# CONFIG_QFMT_V1 is not set
# CONFIG_QFMT_V2 is not set
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
#
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
CONFIG_NFS_DIRECTIO=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
#
# Profiling support
#
# Kernel hacking
#
+# CONFIG_PRINTK_TIME is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOG_BUF_SHIFT=20
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FS is not set
+# CONFIG_KPROBES is not set
CONFIG_IA64_GRANULE_16MB=y
# CONFIG_IA64_GRANULE_64MB is not set
# CONFIG_IA64_PRINT_HAZARDS is not set
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=m
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
CONFIG_CRYPTO_DES=m
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-rc1-20050629
-# Wed Jun 29 15:28:12 2005
+# Linux kernel version: 2.6.13-rc6-tiger-smp
+# Wed Aug 17 10:19:51 2005
#
#
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_BUTTON=m
# CONFIG_ACPI_VIDEO is not set
+# CONFIG_ACPI_HOTKEY is not set
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
# CONFIG_ACPI_HOTPLUG_CPU is not set
#
# CONFIG_PCCARD is not set
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+
#
# Device Drivers
#
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=m
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_SCSI_QLA2300=m
CONFIG_SCSI_QLA2322=m
# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA24XX is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_I2O is not set
#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
+# Network device support
#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-CONFIG_ARPD=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
#
# ISDN subsystem
CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
+# CONFIG_DRM_VIA is not set
CONFIG_RAW_DRIVER=m
CONFIG_HPET=y
# CONFIG_HPET_RTC_IRQ is not set
# I2C support
#
# CONFIG_I2C is not set
+# CONFIG_I2C_SENSOR is not set
#
# Dallas's 1-wire bus
#
# CONFIG_W1 is not set
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
#
# Misc devices
#
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
#
# USB Imaging devices
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set
#
# CONFIG_XFS_POSIX_ACL is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-rc1-20050629
-# Wed Jun 29 15:31:11 2005
+# Linux kernel version: 2.6.13-rc6
+# Wed Aug 17 10:02:43 2005
#
#
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_VIDEO=m
+CONFIG_ACPI_HOTKEY=m
CONFIG_ACPI_FAN=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_THERMAL=y
#
# CONFIG_PCCARD is not set
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_IP_TCPDIAG is not set
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_CONNTRACK is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+CONFIG_IP_NF_ARPTABLES=y
+# CONFIG_IP_NF_ARPFILTER is not set
+# CONFIG_IP_NF_ARP_MANGLE is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+
#
# Device Drivers
#
# CONFIG_SCSI_QLA2300 is not set
# CONFIG_SCSI_QLA2322 is not set
# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA24XX is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
#
# Fusion MPT device support
#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_FC=y
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_CTL=m
#
# IEEE 1394 (FireWire) support
# CONFIG_I2O is not set
#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
+# Network device support
#
-# CONFIG_IP_NF_CONNTRACK is not set
-# CONFIG_IP_NF_CONNTRACK_MARK is not set
-# CONFIG_IP_NF_QUEUE is not set
-# CONFIG_IP_NF_IPTABLES is not set
-CONFIG_IP_NF_ARPTABLES=y
-# CONFIG_IP_NF_ARPFILTER is not set
-# CONFIG_IP_NF_ARP_MANGLE is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
# CONFIG_BONDING is not set
# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
CONFIG_DRM_RADEON=y
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_HPET is not set
# CONFIG_HANGCHECK_TIMER is not set
# CONFIG_I2C_VIAPRO is not set
# CONFIG_I2C_VOODOO3 is not set
# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_SENSOR is not set
#
-# Hardware Sensors Chip support
-#
-# CONFIG_I2C_SENSOR is not set
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1026 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ADM9240 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_ATXP1 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_FSCPOS is not set
-# CONFIG_SENSORS_GL518SM is not set
-# CONFIG_SENSORS_GL520SM is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM63 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
-# CONFIG_SENSORS_LM78 is not set
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
-# CONFIG_SENSORS_LM85 is not set
-# CONFIG_SENSORS_LM87 is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_LM92 is not set
-# CONFIG_SENSORS_MAX1619 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_SIS5595 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_VIA686A is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_SENSORS_W83627EHF is not set
-
-#
-# Other I2C Chip support
+# Miscellaneous I2C Chip support
#
# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_DS1374 is not set
#
# CONFIG_W1 is not set
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+
#
# Misc devices
#
#
# Video Adapters
#
-# CONFIG_TUNER_MULTI_I2C is not set
# CONFIG_VIDEO_BT848 is not set
# CONFIG_VIDEO_CPIA is not set
# CONFIG_VIDEO_SAA5246A is not set
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
#
# USB Imaging devices
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
#
# USB DSL modem support
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
#
# XFS support
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
.save rp, r0
.body
movl gp = __gp
- movl sp = stack_mem
+ movl sp = stack_mem+16384-16
bsw.1
br.call.sptk.many rp=start_bootloader
END(_start)
return ((struct sal_ret_values) {status, r9, r10, r11});
}
-
-/*
- * This is here to work around a bug in egcs-1.1.1b that causes the
- * compiler to crash (seems like a bug in the new alias analysis code.
- */
-void *
-id (long addr)
-{
- return (void *) addr;
-}
-
struct ia64_boot_param *
sys_fw_init (const char *args, int arglen)
{
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
-#include <asm/segment.h>
#include "ia32priv.h"
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
--- /dev/null
+
+#
+# CPU Frequency scaling
+#
+
+menu "CPU Frequency scaling"
+
+source "drivers/cpufreq/Kconfig"
+
+if CPU_FREQ
+
+comment "CPUFreq processor drivers"
+
+config IA64_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ select CPU_FREQ_TABLE
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+endif # CPU_FREQ
+
+endmenu
+
--- /dev/null
+obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
--- /dev/null
+/*
+ * arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+ * This file provides the ACPI based P-state support. This
+ * module works with generic cpufreq infrastructure. Most of
+ * the code is based on i386 version
+ * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
+ *
+ * Copyright (C) 2005 Intel Corp
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pal.h>
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
+
+MODULE_AUTHOR("Venkatesh Pallipadi");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+
+struct cpufreq_acpi_io {
+ struct acpi_processor_performance acpi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int resume;
+};
+
+static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+
+static int
+processor_set_pstate (
+ u32 value)
+{
+ s64 retval;
+
+ dprintk("processor_set_pstate\n");
+
+ retval = ia64_pal_set_pstate((u64)value);
+
+ if (retval) {
+ dprintk("Failed to set freq to 0x%x, with error 0x%x\n",
+ value, retval);
+ return -ENODEV;
+ }
+ return (int)retval;
+}
+
+
+static int
+processor_get_pstate (
+ u32 *value)
+{
+ u64 pstate_index = 0;
+ s64 retval;
+
+ dprintk("processor_get_pstate\n");
+
+ retval = ia64_pal_get_pstate(&pstate_index);
+ *value = (u32) pstate_index;
+
+ if (retval)
+ dprintk("Failed to get current freq with "
+ "error 0x%x, idx 0x%x\n", retval, *value);
+
+ return (int)retval;
+}
+
+
+/* To be used only after data->acpi_data is initialized */
+static unsigned
+extract_clock (
+ struct cpufreq_acpi_io *data,
+ unsigned value,
+ unsigned int cpu)
+{
+ unsigned long i;
+
+ dprintk("extract_clock\n");
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ if (value >= data->acpi_data.states[i].control)
+ return data->acpi_data.states[i].core_frequency;
+ }
+ return data->acpi_data.states[i-1].core_frequency;
+}
+
+
+static unsigned int
+processor_get_freq (
+ struct cpufreq_acpi_io *data,
+ unsigned int cpu)
+{
+ int ret = 0;
+ u32 value = 0;
+ cpumask_t saved_mask;
+ unsigned long clock_freq;
+
+ dprintk("processor_get_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ if (smp_processor_id() != cpu) {
+ ret = -EAGAIN;
+ goto migrate_end;
+ }
+
+ /*
+ * processor_get_pstate gets the average frequency since the
+ * last get. So, do two PAL_get_freq()...
+ */
+ ret = processor_get_pstate(&value);
+ ret = processor_get_pstate(&value);
+
+ if (ret) {
+ set_cpus_allowed(current, saved_mask);
+ printk(KERN_WARNING "get performance failed with error %d\n",
+ ret);
+ ret = -EAGAIN;
+ goto migrate_end;
+ }
+ clock_freq = extract_clock(data, value, cpu);
+ ret = (clock_freq*1000);
+
+migrate_end:
+ set_cpus_allowed(current, saved_mask);
+ return ret;
+}
+
+
+static int
+processor_set_freq (
+ struct cpufreq_acpi_io *data,
+ unsigned int cpu,
+ int state)
+{
+ int ret = 0;
+ u32 value = 0;
+ struct cpufreq_freqs cpufreq_freqs;
+ cpumask_t saved_mask;
+ int retval;
+
+ dprintk("processor_set_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ if (smp_processor_id() != cpu) {
+ retval = -EAGAIN;
+ goto migrate_end;
+ }
+
+ if (state == data->acpi_data.state) {
+ if (unlikely(data->resume)) {
+ dprintk("Called after resume, resetting to P%d\n", state);
+ data->resume = 0;
+ } else {
+ dprintk("Already at target state (P%d)\n", state);
+ retval = 0;
+ goto migrate_end;
+ }
+ }
+
+ dprintk("Transitioning from P%d to P%d\n",
+ data->acpi_data.state, state);
+
+ /* cpufreq frequency struct */
+ cpufreq_freqs.cpu = cpu;
+ cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
+ cpufreq_freqs.new = data->freq_table[state].frequency;
+
+ /* notify cpufreq */
+ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+
+ /*
+ * First we write the target state's 'control' value to the
+ * control_register.
+ */
+
+ value = (u32) data->acpi_data.states[state].control;
+
+ dprintk("Transitioning to state: 0x%08x\n", value);
+
+ ret = processor_set_pstate(value);
+ if (ret) {
+ unsigned int tmp = cpufreq_freqs.new;
+ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freqs.new = cpufreq_freqs.old;
+ cpufreq_freqs.old = tmp;
+ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+ printk(KERN_WARNING "Transition failed with error %d\n", ret);
+ retval = -ENODEV;
+ goto migrate_end;
+ }
+
+ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+
+ data->acpi_data.state = state;
+
+ retval = 0;
+
+migrate_end:
+ set_cpus_allowed(current, saved_mask);
+ return (retval);
+}
+
+
+static unsigned int
+acpi_cpufreq_get (
+ unsigned int cpu)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+
+ dprintk("acpi_cpufreq_get\n");
+
+ return processor_get_freq(data, cpu);
+}
+
+
+static int
+acpi_cpufreq_target (
+ struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ unsigned int next_state = 0;
+ unsigned int result = 0;
+
+ dprintk("acpi_cpufreq_setpolicy\n");
+
+ result = cpufreq_frequency_table_target(policy,
+ data->freq_table, target_freq, relation, &next_state);
+ if (result)
+ return (result);
+
+ result = processor_set_freq(data, policy->cpu, next_state);
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_verify (
+ struct cpufreq_policy *policy)
+{
+ unsigned int result = 0;
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ dprintk("acpi_cpufreq_verify\n");
+
+ result = cpufreq_frequency_table_verify(policy,
+ data->freq_table);
+
+ return (result);
+}
+
+
+/*
+ * processor_init_pdc - let BIOS know about the SMP capabilities
+ * of this driver
+ * @perf: processor-specific acpi_io_data struct
+ * @cpu: CPU being initialized
+ *
+ * To avoid issues with legacy OSes, some BIOSes require to be informed of
+ * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
+ * accordingly. Actual call to _PDC is done in driver/acpi/processor.c
+ */
+static void
+processor_init_pdc (
+ struct acpi_processor_performance *perf,
+ unsigned int cpu,
+ struct acpi_object_list *obj_list
+ )
+{
+ union acpi_object *obj;
+ u32 *buf;
+
+ dprintk("processor_init_pdc\n");
+
+ perf->pdc = NULL;
+ /* Initialize pdc. It will be used later. */
+ if (!obj_list)
+ return;
+
+ if (!(obj_list->count && obj_list->pointer))
+ return;
+
+ obj = obj_list->pointer;
+ if ((obj->buffer.length == 12) && obj->buffer.pointer) {
+ buf = (u32 *)obj->buffer.pointer;
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
+ perf->pdc = obj_list;
+ }
+ return;
+}
+
+
+static int
+acpi_cpufreq_cpu_init (
+ struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_acpi_io *data;
+ unsigned int result = 0;
+
+ union acpi_object arg0 = {ACPI_TYPE_BUFFER};
+ u32 arg0_buf[3];
+ struct acpi_object_list arg_list = {1, &arg0};
+
+ dprintk("acpi_cpufreq_cpu_init\n");
+ /* setup arg_list for _PDC settings */
+ arg0.buffer.length = 12;
+ arg0.buffer.pointer = (u8 *) arg0_buf;
+
+ data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+ if (!data)
+ return (-ENOMEM);
+
+ memset(data, 0, sizeof(struct cpufreq_acpi_io));
+
+ acpi_io_data[cpu] = data;
+
+ processor_init_pdc(&data->acpi_data, cpu, &arg_list);
+ result = acpi_processor_register_performance(&data->acpi_data, cpu);
+ data->acpi_data.pdc = NULL;
+
+ if (result)
+ goto err_free;
+
+ /* capability check */
+ if (data->acpi_data.state_count <= 1) {
+ dprintk("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if ((data->acpi_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (data->acpi_data.status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ dprintk("Unsupported address space [%d, %d]\n",
+ (u32) (data->acpi_data.control_register.space_id),
+ (u32) (data->acpi_data.status_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ /* alloc freq_table */
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ (data->acpi_data.state_count + 1),
+ GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i=0; i<data->acpi_data.state_count; i++) {
+ if ((data->acpi_data.states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency) {
+ policy->cpuinfo.transition_latency =
+ data->acpi_data.states[i].transition_latency * 1000;
+ }
+ }
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+ policy->cur = processor_get_freq(data, policy->cpu);
+
+ /* table init */
+ for (i = 0; i <= data->acpi_data.state_count; i++)
+ {
+ data->freq_table[i].index = i;
+ if (i < data->acpi_data.state_count) {
+ data->freq_table[i].frequency =
+ data->acpi_data.states[i].core_frequency * 1000;
+ } else {
+ data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+ }
+ }
+
+ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ if (result) {
+ goto err_freqfree;
+ }
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
+ "activated.\n", cpu);
+
+ for (i = 0; i < data->acpi_data.state_count; i++)
+ dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+ (i == data->acpi_data.state?'*':' '), i,
+ (u32) data->acpi_data.states[i].core_frequency,
+ (u32) data->acpi_data.states[i].power,
+ (u32) data->acpi_data.states[i].transition_latency,
+ (u32) data->acpi_data.states[i].bus_master_latency,
+ (u32) data->acpi_data.states[i].status,
+ (u32) data->acpi_data.states[i].control);
+
+ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+ /* the first call to ->target() should result in us actually
+ * writing something to the appropriate registers. */
+ data->resume = 1;
+
+ return (result);
+
+ err_freqfree:
+ kfree(data->freq_table);
+ err_unreg:
+ acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ err_free:
+ kfree(data);
+ acpi_io_data[cpu] = NULL;
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_exit (
+ struct cpufreq_policy *policy)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ dprintk("acpi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ acpi_io_data[policy->cpu] = NULL;
+ acpi_processor_unregister_performance(&data->acpi_data,
+ policy->cpu);
+ kfree(data);
+ }
+
+ return (0);
+}
+
+
+static struct freq_attr* acpi_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = acpi_cpufreq_verify,
+ .target = acpi_cpufreq_target,
+ .get = acpi_cpufreq_get,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .name = "acpi-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = acpi_cpufreq_attr,
+};
+
+
+static int __init
+acpi_cpufreq_init (void)
+{
+ dprintk("acpi_cpufreq_init\n");
+
+ return cpufreq_register_driver(&acpi_cpufreq_driver);
+}
+
+
+static void __exit
+acpi_cpufreq_exit (void)
+{
+ dprintk("acpi_cpufreq_exit\n");
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ return;
+}
+
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
#endif
/* Attach the domains */
- for_each_online_cpu(i) {
+ for_each_cpu_mask(i, *cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
thread->pfm_context, ctx));
+ ret = -EBUSY;
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)];
-static spinlock_t data_lock, data_saved_lock;
+static DEFINE_SPINLOCK(data_lock);
+static DEFINE_SPINLOCK(data_saved_lock);
/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
* record.
lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
- bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \
+ bitop.o checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o swiotlb.o
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
- io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
- (PAGE_SHIFT - IO_TLB_SHIFT);
+ io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
unsigned long i;
if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> PAGE_SHIFT);
+ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
#include <asm/machvec.h>
#include <asm/page.h>
-#include <asm/segment.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/sal.h>
res->start = region->start + offset;
res->end = region->end + offset;
}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
{
continue;
}
+ spin_lock_init(&sn_flush_device_list->sfdl_flush_lock);
hubdev->hdi_flush_nasid_list.widget_p[widget] =
sn_flush_device_list;
}
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
struct hubdev_info *hubdev_info;
- void *provider_soft;
+ void *provider_soft = NULL;
struct sn_pcibus_provider *provider;
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
if (bus == NULL) {
bus = pci_scan_bus(busnum, &pci_root_ops, controller);
if (bus == NULL)
- return; /* error, or bus already scanned */
+ goto error_return; /* error, or bus already scanned */
bus->sysdata = NULL;
}
*/
if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
- return; /* unsupported asic type */
+ goto error_return; /* unsupported asic type */
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
goto error_return; /* no further fixup necessary */
provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
if (provider == NULL)
- return; /* no provider registerd for this asic */
+ goto error_return; /* no provider registerd for this asic */
- provider_soft = NULL;
+ bus->sysdata = controller;
if (provider->bus_fixup)
provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
- if (provider_soft == NULL)
- return; /* fixup failed or not applicable */
+ if (provider_soft == NULL) {
+ /* fixup failed or not applicable */
+ bus->sysdata = NULL;
+ goto error_return;
+ }
/*
* Generic bus fixup goes here. Don't reference prom_bussoft_ptr
* after this point.
*/
- bus->sysdata = controller;
PCI_CONTROLLER(bus)->platform_data = provider_soft;
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
cnode = nasid_to_cnodeid(nasid);
#include <asm/topology.h>
#include <asm/smp.h>
#include <asm/semaphore.h>
-#include <asm/segment.h>
#include <asm/uaccess.h>
#include <asm/sal.h>
#include <asm/sn/io.h>
# Common NUMA Features
config NUMA
bool "Numa Memory Allocation Support"
- depends on SMP
+ depends on SMP && BROKEN
default n
# turning this on wastes a bunch of space.
config PCI
bool "PCI support"
+ depends on BROKEN
default n
help
Find out whether you have a PCI motherboard. PCI is the name of a
config DEBUG_PAGEALLOC
bool "Page alloc debugging"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && BROKEN
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
typedef struct {
unsigned long icucr; /* ICU Control Register */
} icu_data_t;
+static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
+#else
+icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
#endif /* CONFIG_SMP */
-static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
static void disable_m32700ut_irq(unsigned int irq)
{
typedef struct {
unsigned long icucr; /* ICU Control Register */
} icu_data_t;
+static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
+#else
+icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
#endif /* CONFIG_SMP */
-static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
static void disable_opsput_irq(unsigned int irq)
{
/* which physical physical ID maps to which logical CPU number */
static volatile int physid_2_cpu[NR_CPUS];
+#define physid_to_cpu(physid) physid_2_cpu[physid]
/* which logical CPU number maps to which physical ID */
volatile int cpu_2_physid[NR_CPUS];
return csum_partial(dst, len-missing, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial);
#include <linux/mmzone.h>
#include <linux/initrd.h>
#include <linux/nodemask.h>
+#include <linux/module.h>
#include <asm/setup.h>
extern char _end[];
struct pglist_data *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata;
pg_data_t m32r_node_data[MAX_NUMNODES];
default y if PPC_PREP
config SMP
+ depends on PPC_STD_MMU
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
config IRQ_ALL_CPUS
bool "Distribute interrupts on all CPUs by default"
- depends on SMP
+ depends on SMP && !MV64360
help
This option gives the kernel permission to distribute IRQs across
multiple CPUs. Saying N here will route all IRQs to the first
source "drivers/zorro/Kconfig"
+if !44x || BROKEN
source kernel/power/Kconfig
+endif
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on 40x || 44x
default y
+config WANT_EARLY_SERIAL
+ bool
+ select SERIAL_8250
+ default n
+
menu "IBM 4xx options"
depends on 4xx
config BUBINGA
bool "Bubinga"
+ select WANT_EARLY_SERIAL
help
This option enables support for the IBM 405EP evaluation board.
config BAMBOO
bool "Bamboo"
+ select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440EP evaluation board.
config EBONY
bool "Ebony"
+ select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440GP evaluation board.
config LUAN
bool "Luan"
+ select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440SP evaluation board.
config OCOTEA
bool "Ocotea"
+ select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440GX evaluation board.
depends on 4xx
default y
-config PM
- bool "Power Management support (EXPERIMENTAL)"
- depends on 4xx && EXPERIMENTAL
-
choice
prompt "TTYS0 device and default console"
depends on 40x
ppc_md.find_end_of_memory = m8xx_find_end_of_memory;
ppc_md.setup_io_mappings = m8xx_map_io;
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+#if defined(CONFIG_BLK_DEV_MPC8xx_IDE)
m8xx_ide_init();
#endif
}
return DMA_STATUS_GOOD;
}
+#ifdef CONFIG_PPC4xx_EDMA
/*
* Enables the burst on the channel (BTEN bit in the control/count register)
* Note:
return DMA_STATUS_GOOD;
}
+EXPORT_SYMBOL(ppc4xx_enable_burst);
+EXPORT_SYMBOL(ppc4xx_disable_burst);
+EXPORT_SYMBOL(ppc4xx_set_burst_size);
+#endif /* CONFIG_PPC4xx_EDMA */
+
EXPORT_SYMBOL(ppc4xx_init_dma_channel);
EXPORT_SYMBOL(ppc4xx_get_channel_config);
EXPORT_SYMBOL(ppc4xx_set_channel_priority);
EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
EXPORT_SYMBOL(ppc4xx_get_dma_status);
EXPORT_SYMBOL(ppc4xx_clr_dma_status);
-EXPORT_SYMBOL(ppc4xx_enable_burst);
-EXPORT_SYMBOL(ppc4xx_disable_burst);
-EXPORT_SYMBOL(ppc4xx_set_burst_size);
+
/* The HvReleaseData is the root of the information shared between
* the hypervisor and Linux.
*/
-
-/*
- * WARNING - magic here
- *
- * Ok, this is a horrid hack below, but marginally better than the
- * alternatives. What we really want is just to initialize
- * hvReleaseData in C as in the #if 0 section here. However, gcc
- * refuses to believe that (u32)&x is a constant expression, so will
- * not allow the xMsNucDataOffset field to be properly initialized.
- * So, we declare hvReleaseData in inline asm instead. We use inline
- * asm, rather than a .S file, because the assembler won't generate
- * the necessary relocation for the LparMap either, unless that symbol
- * is declared in the same source file. Finally, we put the asm in a
- * dummy, attribute-used function, instead of at file scope, because
- * file scope asms don't allow contraints. We want to use the "i"
- * constraints to put sizeof() and offsetof() expressions in there,
- * because including asm/offsets.h in C code then stringifying causes
- * all manner of warnings.
- */
-#if 0
struct HvReleaseData hvReleaseData = {
.xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
.xSize = sizeof(struct HvReleaseData),
.xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
.xSlicNacaAddr = &naca, /* 64-bit Naca address */
- .xMsNucDataOffset = (u32)((unsigned long)&xLparMap - KERNELBASE),
+ .xMsNucDataOffset = LPARMAP_PHYS,
.xFlags = HVREL_TAGSINACTIVE /* tags inactive */
/* 64 bit */
/* shared processors */
0xa7, 0x40, 0xf2, 0x4b,
0xf4, 0x4b, 0xf6, 0xf4 },
};
-#endif
-
-
-extern struct HvReleaseData hvReleaseData;
-
-static void __attribute_used__ hvReleaseData_wrapper(void)
-{
- /* This doesn't appear to need any alignment (even 4 byte) */
- asm volatile (
- " lparMapPhys = xLparMap - %3\n"
- " .data\n"
- " .globl hvReleaseData\n"
- "hvReleaseData:\n"
- " .long 0xc8a5d9c4\n" /* xDesc */
- /* "HvRD" in ebcdic */
- " .short %0\n" /* xSize */
- " .short %1\n" /* xVpdAreasPtrOffset */
- " .llong naca\n" /* xSlicNacaAddr */
- " .long lparMapPhys\n" /* xMsNucDataOffset */
- " .long 0\n" /* xRsvd1 */
- " .short %2\n" /* xFlags */
- " .short 4\n" /* xVrmIndex - v5r2m0 */
- " .short 3\n" /* xMinSupportedPlicVrmIndex - v5r1m0 */
- " .short 3\n" /* xMinCompatablePlicVrmIndex - v5r1m0 */
- " .long 0xd38995a4\n" /* xVrmName */
- " .long 0xa740f24b\n" /* "Linux 2.4.64" ebcdic */
- " .long 0xf44bf6f4\n"
- " . = hvReleaseData + %0\n"
- " .previous\n"
- : : "i"(sizeof(hvReleaseData)),
- "i"(offsetof(struct naca_struct, xItVpdAreas)),
- "i"(HVREL_TAGSINACTIVE /* tags inactive, 64 bit, */
- /* shared processors, HMT allowed */
- | 6), /* TEMP: This allows non-GA drivers */
- "i"(KERNELBASE)
- );
-}
-
-struct LparMap __attribute__((aligned (16))) xLparMap = {
- .xNumberEsids = HvEsidsToMap,
- .xNumberRanges = HvRangesToMap,
- .xSegmentTableOffs = STAB0_PAGE,
-
- .xEsids = {
- { .xKernelEsid = GET_ESID(KERNELBASE),
- .xKernelVsid = KERNEL_VSID(KERNELBASE), },
- { .xKernelEsid = GET_ESID(VMALLOCBASE),
- .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
- },
-
- .xRanges = {
- { .xPages = HvPagesToMap,
- .xOffset = 0,
- .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
- },
- },
-};
extern void system_reset_iSeries(void);
extern void machine_check_iSeries(void);
obj-$(CONFIG_KPROBES) += kprobes.o
CFLAGS_ioctl32.o += -Ifs/
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s
+AFLAGS_head.o += -Iarch/ppc64/kernel
+endif
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
+#include <asm/iSeries/LparMap.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
.globl fwnmi_data_area
fwnmi_data_area:
+#ifdef CONFIG_PPC_ISERIES
+ . = LPARMAP_PHYS
+#include "lparmap.s"
+#endif /* CONFIG_PPC_ISERIES */
+
/*
* Vectors for the FWNMI option. Share common code.
*/
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
- int outcount;
+ int outcount, incount;
unsigned long handle;
BUG_ON(direction == DMA_NONE);
outs = s = segstart = &sglist[0];
outcount = 1;
+ incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
DBG("mapped %d elements:\n", outcount);
- /* For the sake of iommu_free_sg, we clear out the length in the
+ /* For the sake of iommu_unmap_sg, we clear out the length in the
* next entry of the sglist if we didn't fill the list completely
*/
- if (outcount < nelems) {
+ if (outcount < incount) {
outs++;
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
--- /dev/null
+/*
+ * Copyright (C) 2005 Stephen Rothwell IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/iSeries/LparMap.h>
+
+const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
+ .xNumberEsids = HvEsidsToMap,
+ .xNumberRanges = HvRangesToMap,
+ .xSegmentTableOffs = STAB0_PAGE,
+
+ .xEsids = {
+ { .xKernelEsid = GET_ESID(KERNELBASE),
+ .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+ { .xKernelEsid = GET_ESID(VMALLOCBASE),
+ .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
+ },
+
+ .xRanges = {
+ { .xPages = HvPagesToMap,
+ .xOffset = 0,
+ .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
+ },
+ },
+};
local_irq_disable();
while (1) ;
}
+/* Used by the G5 thermal driver */
+EXPORT_SYMBOL_GPL(machine_power_off);
void machine_halt(void)
{
"lra 3,0(%4)\n"
"lr 5,%5\n"
"diag 2,4,0x8\n"
- "brc 8, .Litfits\n"
+ "brc 8, 1f\n"
"ar 5, %5\n"
- ".Litfits: \n"
+ "1: \n"
"lr %0,4\n"
"lr %1,5\n"
: "=d" (return_code), "=d" (return_len)
"sam31\n"
"diag 2,4,0x8\n"
"sam64\n"
- "brc 8, .Litfits\n"
+ "brc 8, 1f\n"
"agr 5, %5\n"
- ".Litfits: \n"
+ "1: \n"
"lgr %0,4\n"
"lgr %1,5\n"
: "=d" (return_code), "=d" (return_len)
.long sys_add_key /* 285 */
.long sys_request_key
.long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init /* 290 */
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
/* End of entry.S */
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch /* 320 */
* The module references will be fixed up by module_frob_arch_sections.
*/
#define DOT_ALIAS2(__ret, __x, __arg1, __arg2) \
- extern __ret __x(__arg1, __arg2) \
- __attribute__((weak, alias("." # __x)));
+ extern __ret __x(__arg1, __arg2); \
+ asm(".weak " #__x);\
+ asm(#__x "=." #__x);
DOT_ALIAS2(int, div, int, int)
DOT_ALIAS2(int, mul, int, int)
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \
- traps.o devices.o auxio.o \
+ traps.o devices.o auxio.o una_asm.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
pbm->parent->resource_adjust(pdev, res, root);
}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
char * __init pcibios_setup(char *str)
{
TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
+ TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
+ TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
+ TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1)))
thread_info_offsets_are_bolixed_dave();
--- /dev/null
+/* una_asm.S: Kernel unaligned trap assembler helpers.
+ *
+ * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+ .text
+
+kernel_unaligned_trap_fault:
+ call kernel_mna_trap_fault
+ nop
+ retl
+ nop
+ .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
+
+ .globl __do_int_store
+__do_int_store:
+ rd %asi, %o4
+ wr %o3, 0, %asi
+ ldx [%o2], %g3
+ cmp %o1, 2
+ be,pn %icc, 2f
+ cmp %o1, 4
+ be,pt %icc, 1f
+ srlx %g3, 24, %g2
+ srlx %g3, 56, %g1
+ srlx %g3, 48, %g7
+4: stba %g1, [%o0] %asi
+ srlx %g3, 40, %g1
+5: stba %g7, [%o0 + 1] %asi
+ srlx %g3, 32, %g7
+6: stba %g1, [%o0 + 2] %asi
+7: stba %g7, [%o0 + 3] %asi
+ srlx %g3, 16, %g1
+8: stba %g2, [%o0 + 4] %asi
+ srlx %g3, 8, %g7
+9: stba %g1, [%o0 + 5] %asi
+10: stba %g7, [%o0 + 6] %asi
+ ba,pt %xcc, 0f
+11: stba %g3, [%o0 + 7] %asi
+1: srl %g3, 16, %g7
+12: stba %g2, [%o0] %asi
+ srl %g3, 8, %g2
+13: stba %g7, [%o0 + 1] %asi
+14: stba %g2, [%o0 + 2] %asi
+ ba,pt %xcc, 0f
+15: stba %g3, [%o0 + 3] %asi
+2: srl %g3, 8, %g2
+16: stba %g2, [%o0] %asi
+17: stba %g3, [%o0 + 1] %asi
+0:
+ wr %o4, 0x0, %asi
+ retl
+ nop
+ .size __do_int_store, .-__do_int_store
+
+ .section __ex_table
+ .word 4b, kernel_unaligned_trap_fault
+ .word 5b, kernel_unaligned_trap_fault
+ .word 6b, kernel_unaligned_trap_fault
+ .word 7b, kernel_unaligned_trap_fault
+ .word 8b, kernel_unaligned_trap_fault
+ .word 9b, kernel_unaligned_trap_fault
+ .word 10b, kernel_unaligned_trap_fault
+ .word 11b, kernel_unaligned_trap_fault
+ .word 12b, kernel_unaligned_trap_fault
+ .word 13b, kernel_unaligned_trap_fault
+ .word 14b, kernel_unaligned_trap_fault
+ .word 15b, kernel_unaligned_trap_fault
+ .word 16b, kernel_unaligned_trap_fault
+ .word 17b, kernel_unaligned_trap_fault
+ .previous
+
+ .globl do_int_load
+do_int_load:
+ rd %asi, %o5
+ wr %o4, 0, %asi
+ cmp %o1, 8
+ bge,pn %icc, 9f
+ cmp %o1, 4
+ be,pt %icc, 6f
+4: lduba [%o2] %asi, %g2
+5: lduba [%o2 + 1] %asi, %g3
+ sll %g2, 8, %g2
+ brz,pt %o3, 3f
+ add %g2, %g3, %g2
+ sllx %g2, 48, %g2
+ srax %g2, 48, %g2
+3: ba,pt %xcc, 0f
+ stx %g2, [%o0]
+6: lduba [%o2 + 1] %asi, %g3
+ sll %g2, 24, %g2
+7: lduba [%o2 + 2] %asi, %g7
+ sll %g3, 16, %g3
+8: lduba [%o2 + 3] %asi, %g1
+ sll %g7, 8, %g7
+ or %g2, %g3, %g2
+ or %g7, %g1, %g7
+ or %g2, %g7, %g2
+ brnz,a,pt %o3, 3f
+ sra %g2, 0, %g2
+3: ba,pt %xcc, 0f
+ stx %g2, [%o0]
+9: lduba [%o2] %asi, %g2
+10: lduba [%o2 + 1] %asi, %g3
+ sllx %g2, 56, %g2
+11: lduba [%o2 + 2] %asi, %g7
+ sllx %g3, 48, %g3
+12: lduba [%o2 + 3] %asi, %g1
+ sllx %g7, 40, %g7
+ sllx %g1, 32, %g1
+ or %g2, %g3, %g2
+ or %g7, %g1, %g7
+13: lduba [%o2 + 4] %asi, %g3
+ or %g2, %g7, %g7
+14: lduba [%o2 + 5] %asi, %g1
+ sllx %g3, 24, %g3
+15: lduba [%o2 + 6] %asi, %g2
+ sllx %g1, 16, %g1
+ or %g7, %g3, %g7
+16: lduba [%o2 + 7] %asi, %g3
+ sllx %g2, 8, %g2
+ or %g7, %g1, %g7
+ or %g2, %g3, %g2
+ or %g7, %g2, %g7
+ cmp %o1, 8
+ be,a,pt %icc, 0f
+ stx %g7, [%o0]
+ srlx %g7, 32, %g2
+ sra %g7, 0, %g7
+ stx %g2, [%o0]
+ stx %g7, [%o0 + 8]
+0:
+ wr %o5, 0x0, %asi
+ retl
+ nop
+ .size __do_int_load, .-__do_int_load
+
+ .section __ex_table
+ .word 4b, kernel_unaligned_trap_fault
+ .word 5b, kernel_unaligned_trap_fault
+ .word 6b, kernel_unaligned_trap_fault
+ .word 7b, kernel_unaligned_trap_fault
+ .word 8b, kernel_unaligned_trap_fault
+ .word 9b, kernel_unaligned_trap_fault
+ .word 10b, kernel_unaligned_trap_fault
+ .word 11b, kernel_unaligned_trap_fault
+ .word 12b, kernel_unaligned_trap_fault
+ .word 13b, kernel_unaligned_trap_fault
+ .word 14b, kernel_unaligned_trap_fault
+ .word 15b, kernel_unaligned_trap_fault
+ .word 16b, kernel_unaligned_trap_fault
+ .previous
die_if_kernel(str, regs);
}
-#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %4, 0, %%asi\n\t" \
- "cmp %1, 8\n\t" \
- "bge,pn %%icc, 9f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 6f\n" \
-"4:\t" " lduba [%2] %%asi, %%l1\n" \
-"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 8, %%l1\n\t" \
- "brz,pt %3, 3f\n\t" \
- " add %%l1, %%l2, %%l1\n\t" \
- "sllx %%l1, 48, %%l1\n\t" \
- "srax %%l1, 48, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 24, %%l1\n" \
-"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sll %%l2, 16, %%l2\n" \
-"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sll %%g7, 8, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%g7, %%l1\n\t" \
- "brnz,a,pt %3, 3f\n\t" \
- " sra %%l1, 0, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"9:\t" "lduba [%2] %%asi, %%l1\n" \
-"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sllx %%l1, 56, %%l1\n" \
-"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sllx %%l2, 48, %%l2\n" \
-"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sllx %%g7, 40, %%g7\n\t" \
- "sllx %%g1, 32, %%g1\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n" \
-"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
- "or %%l1, %%g7, %%g7\n" \
-"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
- "sllx %%l2, 24, %%l2\n" \
-"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
- "sllx %%g1, 16, %%g1\n\t" \
- "or %%g7, %%l2, %%g7\n" \
-"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
- "sllx %%l1, 8, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%l1, %%g7\n\t" \
- "cmp %1, 8\n\t" \
- "be,a,pt %%icc, 0f\n\t" \
- " stx %%g7, [%0]\n\t" \
- "srlx %%g7, 32, %%l1\n\t" \
- "sra %%g7, 0, %%g7\n\t" \
- "stx %%l1, [%0]\n\t" \
- "stx %%g7, [%0 + 8]\n" \
-"0:\n\t" \
- "wr %%g0, %5, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
- "r" (asi), "i" (ASI_AIUS) \
- : "l1", "l2", "g7", "g1", "cc"); \
-})
+extern void do_int_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed, int asi);
-#define store_common(dst_addr, size, src_val, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %3, 0, %%asi\n\t" \
- "ldx [%2], %%l1\n" \
- "cmp %1, 2\n\t" \
- "be,pn %%icc, 2f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 1f\n\t" \
- " srlx %%l1, 24, %%l2\n\t" \
- "srlx %%l1, 56, %%g1\n\t" \
- "srlx %%l1, 48, %%g7\n" \
-"4:\t" "stba %%g1, [%0] %%asi\n\t" \
- "srlx %%l1, 40, %%g1\n" \
-"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
- "srlx %%l1, 32, %%g7\n" \
-"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
-"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
- "srlx %%l1, 16, %%g1\n" \
-"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
- "srlx %%l1, 8, %%g7\n" \
-"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
-"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
-"1:\t" "srl %%l1, 16, %%g7\n" \
-"12:\t" "stba %%l2, [%0] %%asi\n\t" \
- "srl %%l1, 8, %%l2\n" \
-"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
-"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
-"2:\t" "srl %%l1, 8, %%l2\n" \
-"16:\t" "stba %%l2, [%0] %%asi\n" \
-"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
-"0:\n\t" \
- "wr %%g0, %4, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\t" \
- ".word 17b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
- : "l1", "l2", "g7", "g1", "cc"); \
-})
-
-#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
- unsigned long zero = 0; \
- unsigned long *src_val = &zero; \
- \
- if (size == 16) { \
- size = 8; \
- zero = (((long)(reg_num ? \
- (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
- (unsigned)fetch_reg(reg_num + 1, regs); \
- } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
- store_common(dst_addr, size, src_val, asi, errh); \
-})
-
-extern void smp_capture(void);
-extern void smp_release(void);
-
-#define do_atomic(srcdest_reg, mem, errh) ({ \
- unsigned long flags, tmp; \
- \
- smp_capture(); \
- local_irq_save(flags); \
- tmp = *srcdest_reg; \
- do_integer_load(srcdest_reg, 4, mem, 0, errh); \
- store_common(mem, 4, &tmp, errh); \
- local_irq_restore(flags); \
- smp_release(); \
-})
+extern void __do_int_store(unsigned long *dst_addr, int size,
+ unsigned long *src_val, int asi);
+
+static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
+ struct pt_regs *regs, int asi)
+{
+ unsigned long zero = 0;
+ unsigned long *src_val = &zero;
+
+ if (size == 16) {
+ size = 8;
+ zero = (((long)(reg_num ?
+ (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
+ (unsigned)fetch_reg(reg_num + 1, regs);
+ } else if (reg_num) {
+ src_val = fetch_reg_addr(reg_num, regs);
+ }
+ __do_int_store(dst_addr, size, src_val, asi);
+}
static inline void advance(struct pt_regs *regs)
{
return !floating_point_load_or_store_p(insn);
}
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
-
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+void kernel_mna_trap_fault(void)
{
- unsigned long g2 = regs->u_regs [UREG_G2];
+ struct pt_regs *regs = current_thread_info()->kern_una_regs;
+ unsigned int insn = current_thread_info()->kern_una_insn;
+ unsigned long g2 = regs->u_regs[UREG_G2];
unsigned long fixup = search_extables_range(regs->tpc, &g2);
if (!fixup) {
- unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long address;
+
+ address = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel NULL "
+ "pointer dereference in mna handler");
} else
- printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel paging "
+ "request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
- printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
- printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
+ current_thread_info()->kern_una_regs = regs;
+ current_thread_info()->kern_una_insn = insn;
+
if (!ok_for_kernel(insn) || dir == both) {
- printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
- regs->tpc);
- unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
-
- __asm__ __volatile__ ("\n"
-"kernel_unaligned_trap_fault:\n\t"
- "mov %0, %%o0\n\t"
- "call kernel_mna_trap_fault\n\t"
- " mov %1, %%o1\n\t"
- :
- : "r" (regs), "r" (insn)
- : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
- "g1", "g2", "g3", "g4", "g7", "cc");
+ printk("Unsupported unaligned load/store trap for kernel "
+ "at <%016lx>.\n", regs->tpc);
+ unaligned_panic("Kernel does fpu/atomic "
+ "unaligned load/store.", regs);
+
+ kernel_mna_trap_fault();
} else {
- unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long addr;
+ addr = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
#ifdef DEBUG_MNA
- printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
- regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+ printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
+ "retpc[%016lx]\n",
+ regs->tpc, dirstrings[dir], addr, size,
+ regs->u_regs[UREG_RETPC]);
#endif
switch (dir) {
case load:
- do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
- size, (unsigned long *) addr,
- decode_signedness(insn), decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
+ do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn),
+ decode_asi(insn, regs));
break;
case store:
- do_integer_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs,
- decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
- break;
-#if 0 /* unsupported */
- case both:
- do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
- (unsigned long *) addr,
- kernel_unaligned_trap_fault);
+ do_int_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ decode_asi(insn, regs));
break;
-#endif
+
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
{
unsigned long old_refr_count, refr_count, mctrl;
-
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
return ret;
}
+static unsigned int us2e_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long clock_tick, estar;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ return clock_tick / estar_to_divisor(estar);
+}
+
static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
- new_freq = clock_tick = sparc64_get_clock_tick(cpu);
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(index);
divisor = index_to_divisor(index);
new_freq /= divisor;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (old_divisor != divisor)
- us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
unsigned int new_index = 0;
if (cpufreq_frequency_table_target(policy,
- &us2e_freq_table[policy->cpu].table[0],
- target_freq,
- relation,
- &new_index))
+ &us2e_freq_table[policy->cpu].table[0],
+ target_freq, relation, &new_index))
return -EINVAL;
us2e_set_cpu_divider_index(policy->cpu, new_index);
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
memset(us2e_freq_table, 0,
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
+ driver->init = us2e_freq_cpu_init;
driver->verify = us2e_freq_verify;
driver->target = us2e_freq_target;
- driver->init = us2e_freq_cpu_init;
+ driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
{
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
unsigned long ret;
switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
return ret;
}
+static unsigned int us3_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long reg;
+ unsigned int ret;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ reg = read_safari_cfg();
+ ret = get_current_freq(cpu, reg);
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ return ret;
+}
+
static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq, reg;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
- new_freq = sparc64_get_clock_tick(cpu);
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
switch (index) {
case 0:
new_bits = SAFARI_CFG_DIV_1;
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0];
memset(us3_freq_table, 0,
(NR_CPUS * sizeof(struct us3_freq_percpu_info)));
+ driver->init = us3_freq_cpu_init;
driver->verify = us3_freq_verify;
driver->target = us3_freq_target;
- driver->init = us3_freq_cpu_init;
+ driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III");
*
*/
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/time.h>
-#include <linux/devfs_fs_kernel.h>
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
-#include <asm/irq.h>
-#include <asm/pgtable.h>
#include "mem_user.h"
#include "user_util.h"
static char *v_buf = NULL;
static ssize_t
-mmapper_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+mmapper_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- if(*ppos > mmapper_size)
- return -EINVAL;
-
- if(count + *ppos > mmapper_size)
- count = count + *ppos - mmapper_size;
-
- if(count < 0)
- return -EINVAL;
-
- copy_to_user(buf,&v_buf[*ppos],count);
-
- return count;
+ return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size);
}
static ssize_t
-mmapper_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+mmapper_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- if(*ppos > mmapper_size)
+ if (*ppos > mmapper_size)
return -EINVAL;
- if(count + *ppos > mmapper_size)
- count = count + *ppos - mmapper_size;
-
- if(count < 0)
- return -EINVAL;
+ if (count > mmapper_size - *ppos)
+ count = mmapper_size - *ppos;
- copy_from_user(&v_buf[*ppos],buf,count);
+ if (copy_from_user(&v_buf[*ppos], buf, count))
+ return -EFAULT;
return count;
}
int ret = -EINVAL;
int size;
- lock_kernel();
if (vma->vm_pgoff != 0)
goto out;
goto out;
ret = 0;
out:
- unlock_kernel();
return ret;
}
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
} while((n >= 0) && WIFSTOPPED(status) &&
- (WSTOPSIG(status) == SIGVTALRM));
+ ((WSTOPSIG(status) == SIGVTALRM) ||
+ /* running UML inside a detached screen can cause
+ * SIGWINCHes
+ */
+ (WSTOPSIG(status) == SIGWINCH)));
if((n < 0) || !WIFSTOPPED(status) ||
(WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
*/
#include <elf.h>
#include <stddef.h>
-#include <asm/elf.h>
#include "init.h"
#include "elf_user.h"
#include "mem_user.h"
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-rc3
-# Fri Jul 22 16:47:31 2005
+# Linux kernel version: 2.6.13-rc6-git3
+# Fri Aug 12 16:40:34 2005
#
CONFIG_X86_64=y
CONFIG_64BIT=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_NV is not set
# CONFIG_SCSI_QLA2300 is not set
# CONFIG_SCSI_QLA2322 is not set
# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA24XX is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
#
# Fusion MPT device support
#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
# CONFIG_FUSION_FC is not set
+CONFIG_FUSION_MAX_SGE=128
+# CONFIG_FUSION_CTL is not set
#
# IEEE 1394 (FireWire) support
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
#
# ISDN subsystem
}
/*
+ * Compute how much memory is missing in a range.
+ * Unlike the other functions in this file the arguments are in page numbers.
+ */
+unsigned long __init
+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long ram = 0;
+ unsigned long start = start_pfn << PAGE_SHIFT;
+ unsigned long end = end_pfn << PAGE_SHIFT;
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr >= end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr)
+ ram += last - addr;
+ }
+ return ((end - start) - ram) >> PAGE_SHIFT;
+}
+
+/*
* Mark e820 reserved areas as busy for the resource manager.
*/
void __init e820_reserve_resources(void)
{
if (notscsync || !cpu_has_tsc)
return;
- sync_tsc(boot_cpu_id);
+ sync_tsc(0);
}
static __init int notscsync_setup(char *s)
*/
set_cpu_sibling_map(smp_processor_id());
+ /*
+ * Wait for TSC sync to not schedule things before.
+ * We still process interrupts, which could see an inconsistent
+ * time in that window unfortunately.
+ * Do this here because TSC sync has global unprotected state.
+ */
+ tsc_sync_wait();
+
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
unlock_ipi_call_lock();
- mb();
-
- /* Wait for TSC sync to not schedule things before.
- We still process interrupts, which could see an inconsistent
- time in that window unfortunately. */
- tsc_sync_wait();
-
cpu_idle();
}
{
if (tsk->pid == 1)
return 1;
- /* Warn for strace, but not for gdb */
- if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
- (tsk->ptrace & PT_PTRACED))
+ if (tsk->ptrace & PT_PTRACED)
return 0;
return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
void __init paging_init(void)
{
{
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long holes[MAX_NR_ZONES];
unsigned int max_dma;
+ memset(zones_size, 0, sizeof(zones_size));
+ memset(holes, 0, sizeof(holes));
+
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- if (end_pfn < max_dma)
+ if (end_pfn < max_dma) {
zones_size[ZONE_DMA] = end_pfn;
- else {
+ holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
+ } else {
zones_size[ZONE_DMA] = max_dma;
+ holes[ZONE_DMA] = e820_hole_size(0, max_dma);
zones_size[ZONE_NORMAL] = end_pfn - max_dma;
+ holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
}
- free_area_init(zones_size);
+ free_area_init_node(0, NODE_DATA(0), zones_size,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
}
return;
}
{
unsigned long start_pfn, end_pfn;
unsigned long zones[MAX_NR_ZONES];
+ unsigned long holes[MAX_NR_ZONES];
unsigned long dma_end_pfn;
memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
+ memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
start_pfn = node_start_pfn(nodeid);
end_pfn = node_end_pfn(nodeid);
dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
if (start_pfn < dma_end_pfn) {
zones[ZONE_DMA] = dma_end_pfn - start_pfn;
+ holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
+
} else {
zones[ZONE_NORMAL] = end_pfn - start_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
}
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
- start_pfn, NULL);
+ start_pfn, holes);
}
void __init numa_init_array(void)
* if there are no busses hanging off of the current
* ldt link then both the secondary and subordinate
* bus number fields are set to 0.
+ *
+ * RED-PEN
+ * This is slightly broken because it assumes
+ * HT node IDs == Linux node ids, which is not always
+ * true. However it is probably mostly true.
*/
if (!(SECONDARY_LDT_BUS_NUMBER(ldtbus) == 0
&& SUBORDINATE_LDT_BUS_NUMBER(ldtbus) == 0)) {
for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus);
j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
- j++)
- pci_bus_to_node[j] = NODE_ID(nid);
+ j++) {
+ int node = NODE_ID(nid);
+ if (!node_online(node))
+ node = 0;
+ pci_bus_to_node[j] = node;
+ }
}
}
}
*/
#define IS_RESERVED_ADDR(base, len) \
(((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
- && ((base) + (len) > 0x1000))
+ && ((base) + (len) > PCIBIOS_MIN_IO))
/*
* Clearing the flag (IORESOURCE_BUSY) allows drivers to use
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER*/
-int acpi_specific_hotkey_enabled;
+int acpi_specific_hotkey_enabled = TRUE;
EXPORT_SYMBOL(acpi_specific_hotkey_enabled);
static unsigned int acpi_irq_irq;
int __init
acpi_hotkey_setup(char *str)
{
- acpi_specific_hotkey_enabled = TRUE;
+ acpi_specific_hotkey_enabled = FALSE;
return 1;
}
-__setup("acpi_specific_hotkey", acpi_hotkey_setup);
+__setup("acpi_generic_hotkey", acpi_hotkey_setup);
/*
* max_cstate is defined in the base kernel so modules can
static int acpi_shutdown(struct sys_device *x)
{
- return acpi_sleep_prepare(ACPI_STATE_S5);
+ if (system_state == SYSTEM_POWER_OFF) {
+ /* Prepare if we are going to power off the system */
+ return acpi_sleep_prepare(ACPI_STATE_S5);
+ }
+ return 0;
}
static struct sysdev_class acpi_sysclass = {
up(&dev->sem);
put_device(dev);
}
- return err;
+ if (err)
+ return err;
+ return count;
}
static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
pr_debug("device class '%s': release.\n", cd->class_id);
+ if (cd->devt_attr) {
+ kfree(cd->devt_attr);
+ cd->devt_attr = NULL;
+ }
+
if (cls->release)
cls->release(cd);
else {
if (class_dev->dev)
sysfs_remove_link(&class_dev->kobj, "device");
- if (class_dev->devt_attr) {
+ if (class_dev->devt_attr)
class_device_remove_file(class_dev, class_dev->devt_attr);
- kfree(class_dev->devt_attr);
- class_dev->devt_attr = NULL;
- }
class_device_remove_attrs(class_dev);
kobject_hotplug(&class_dev->kobj, KOBJ_REMOVE);
/*
* disable queueing at the driver/hardware level
*/
-static int cfq_max_depth = 1;
+static int cfq_max_depth = 2;
/*
* for the hash of cfqq inside the cfqd
return crq2;
if (crq2 == NULL)
return crq1;
- if (cfq_crq_requeued(crq1))
+
+ if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
return crq1;
- if (cfq_crq_requeued(crq2))
+ else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
+ return crq2;
+
+ if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
+ return crq1;
+ else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
return crq2;
s1 = crq1->request->sector;
cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq)
{
- const int sync = cfq_crq_is_sync(crq);
+ struct cfq_io_context *cic;
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
- if (sync) {
- struct cfq_io_context *cic = crq->io_context;
+ /*
+ * we never wait for an async request and we don't allow preemption
+ * of an async request. so just return early
+ */
+ if (!cfq_crq_is_sync(crq))
+ return;
- cfq_update_io_thinktime(cfqd, cic);
- cfq_update_idle_window(cfqd, cfqq, cic);
+ cic = crq->io_context;
- cic->last_queue = jiffies;
- }
+ cfq_update_io_thinktime(cfqd, cic);
+ cfq_update_idle_window(cfqd, cfqq, cic);
+
+ cic->last_queue = jiffies;
if (cfqq == cfqd->active_queue) {
/*
config COMPUTONE
tristate "Computone IntelliPort Plus serial support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP && (BROKEN || !SPARC32)
---help---
This driver supports the entire family of Intelliport II/Plus
controllers with the exception of the MicroChannel controllers and
config DIGIEPCA
tristate "Digiboard Intelligent Async Support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP && (!64BIT || BROKEN)
---help---
This is a driver for Digi International's Xx, Xeve, and Xem series
of cards which provide multiple serial ports. You would need
config SYNCLINKMP
tristate "SyncLink Multiport support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (BROKEN || !SPARC32)
help
Enable support for the SyncLink Multiport (2 or 4 ports)
serial adapter, running asynchronous and HDLC communications up
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !PPC64
+ depends on RTC!=y && !IA64 && !ARM && !PPC64 && !M32R && !SPARC32
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
{
- unsigned long long val;
+ unsigned long pfn;
+
+ /* Turn a kernel-virtual address into a physical page frame */
+ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+
/*
* RED-PEN: on some architectures there is more mapped memory
* than available in mem_map which pfn_valid checks
*
* RED-PEN: vmalloc is not supported right now.
*/
- if (!pfn_valid(vma->vm_pgoff))
+ if (!pfn_valid(pfn))
return -EIO;
- val = (u64)vma->vm_pgoff << PAGE_SHIFT;
- vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
+
+ vma->vm_pgoff = pfn;
return mmap_mem(file, vma);
}
#include <linux/sched.h>
#include <linux/byteorder/generic.h>
#include <asm/sn/sn_sal.h>
+#include <asm/unaligned.h>
#include "snsc.h"
static struct subch_data_s *event_sd;
scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
{
char *desc_end;
+ __be32 from_buf;
/* record event source address */
- *src = be32_to_cpup((__be32 *)event);
+ from_buf = get_unaligned((__be32 *)event);
+ *src = be32_to_cpup(&from_buf);
event += 4; /* move on to event code */
/* record the system controller's event code */
- *code = be32_to_cpup((__be32 *)event);
+ from_buf = get_unaligned((__be32 *)event);
+ *code = be32_to_cpup(&from_buf);
event += 4; /* move on to event arguments */
/* how many arguments are in the packet? */
/* not an integer argument, so give up */
return -1;
}
- *esp_code = be32_to_cpup((__be32 *)event);
+ from_buf = get_unaligned((__be32 *)event);
+ *esp_code = be32_to_cpup(&from_buf);
event += 4;
/* parse out the event description */
int ret = 0;
acquire_console_sem();
- if (tty->count == 1) {
+ if (tty->driver_data == NULL) {
ret = vc_allocate(currcons);
if (ret == 0) {
struct vc_data *vc = vc_cons[currcons].d;
int adm1026_detach_client(struct i2c_client *client)
{
i2c_detach_client(client);
- kfree(client);
+ kfree(i2c_get_clientdata(client));
return 0;
}
/* Error out and cleanup code */
exitfree:
- kfree(new_client);
+ kfree(data);
exit:
return err;
}
return 0;
exit_free:
- kfree(new_client);
+ kfree(data);
exit:
return err;
}
if ((ret = i2c_detach_client(client)) != 0) {
return ret;
}
- kfree(client);
+ kfree(i2c_get_clientdata(client));
return 0;
}
return 0;
exit_free:
- kfree(new_client);
+ kfree(data);
exit:
return err;
}
"experience to the module author.\n");
/* Supported value: 2 (clears the status) */
- fscpos_write_value(client, FSCPOS_REG_TEMP_STATE[nr], 2);
+ fscpos_write_value(client, FSCPOS_REG_TEMP_STATE[nr - 1], 2);
return count;
}
return 0;
error_free:
- kfree(new_client);
+ kfree(data);
error_release:
release_region(addr, SMSC_EXTENT);
return err;
return 0;
error_free:
- kfree(new_client);
+ kfree(data);
error_release:
release_region(address, SMSC_EXTENT);
return err;
config BLK_DEV_IDEDMA_PMAC
bool "PowerMac IDE DMA support"
depends on BLK_DEV_IDE_PMAC
+ select BLK_DEV_IDEDMA_PCI
help
This option allows the driver for the built-in IDE controller on
Power Macintoshes and PowerBooks to use DMA (direct memory access)
unsigned long flags;
} idefloppy_floppy_t;
-#define IDEFLOPPY_TICKS_DELAY 3 /* default delay for ZIP 100 */
+#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */
/*
* Floppy flag bits values.
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
+ },{ /* 14 */
+ .name = "Revolution",
+ .init_hwif = init_hwif_generic,
+ .channels = 2,
+ .autodma = AUTODMA,
+ .bootable = OFF_BOARD,
}
};
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11},
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12},
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13},
+ { PCI_VENDOR_ID_NETCELL,PCI_DEVICE_ID_REVOLUTION, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14},
/* Must come last. If you add entries adjust this table appropriately and the init_one code */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0},
{ 0, },
*
* CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
*
+ * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE
+ * controller same as the CSB6. Single channel ATA100 only.
+ *
* Documentation:
* Available under NDA only. Errata info very hard to get.
*
if (!svwks_revision)
pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
+ if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
+ return 2;
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
u32 reg = 0;
if (isa_dev)
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
return 1;
default:
break;
btr |= (svwks_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
pci_write_config_byte(dev, 0x5A, btr);
}
+ /* Setup HT1000 SouthBridge Controller - Single Channel Only */
+ else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
+ pci_read_config_byte(dev, 0x5A, &btr);
+ btr &= ~0x40;
+ btr |= 0x3;
+ pci_write_config_byte(dev, 0x5A, btr);
+ }
return (dev->irq) ? dev->irq : 0;
}
.channels = 1, /* 2 */
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ },{ /* 4 */
+ .name = "SvrWks HT1000",
+ .init_setup = init_setup_svwks,
+ .init_chipset = init_chipset_svwks,
+ .init_hwif = init_hwif_svwks,
+ .init_dma = init_dma_svwks,
+ .channels = 1, /* 2 */
+ .autodma = AUTODMA,
+ .bootable = ON_BOARD,
}
};
{ PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
};
static struct pci_device_id pmac_ide_pci_match[] = {
- { PCI_VENDOR_ID_APPLE, PCI_DEVIEC_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_ATA,
case PCI_DEVICE_ID_AMD_VIPER_7409:
case PCI_DEVICE_ID_CMD_643:
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+ case PCI_DEVICE_ID_REVOLUTION:
simplex_stat = hwif->INB(dma_base + 2);
hwif->OUTB((simplex_stat&0x60),(dma_base + 2));
simplex_stat = hwif->INB(dma_base + 2);
int num_ports, i;
spin_lock_init(&ohci->phy_reg_lock);
- spin_lock_init(&ohci->event_lock);
/* Put some defaults to these undefined bus options */
buf = reg_read(ohci, OHCI1394_BusOptions);
/* We hopefully don't have to pre-allocate IT DMA like we did
* for IR DMA above. Allocate it on-demand and mark inactive. */
ohci->it_legacy_context.ohci = NULL;
+ spin_lock_init(&ohci->event_lock);
+ /*
+ * interrupts are disabled, all right, but... due to SA_SHIRQ we
+ * might get called anyway. We'll see no event, of course, but
+ * we need to get to that "no event", so enough should be initialized
+ * by that point.
+ */
if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
OHCI1394_DRIVER_NAME, ohci))
FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
menu "InfiniBand support"
config INFINIBAND
+ depends on PCI || BROKEN
tristate "InfiniBand support"
---help---
Core support for InfiniBand (IB). Make sure to also select
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = idr_find(&ib_uverbs_mr_idr, uobj->id);
+ struct ib_device *mrdev = mr->device;
struct ib_umem_object *memobj;
idr_remove(&ib_uverbs_mr_idr, uobj->id);
ib_dereg_mr(mr);
memobj = container_of(uobj, struct ib_umem_object, uobject);
- ib_umem_release_on_close(mr->device, &memobj->umem);
+ ib_umem_release_on_close(mrdev, &memobj->umem);
list_del(&uobj->list);
kfree(memobj);
err = register_filesystem(&capifs_fs_type);
if (!err) {
capifs_mnt = kern_mount(&capifs_fs_type);
- if (IS_ERR(capifs_mnt))
+ if (IS_ERR(capifs_mnt)) {
err = PTR_ERR(capifs_mnt);
+ unregister_filesystem(&capifs_fs_type);
+ }
}
if (!err)
printk(KERN_NOTICE "capifs: Rev %s\n", rev);
config HISAX_FRITZPCI
bool "AVM PnP/PCI (Fritz!PnP/PCI)"
+ depends on BROKEN || !PPC64
help
This enables HiSax support for the AVM "Fritz!PnP" and "Fritz!PCI".
See <file:Documentation/isdn/README.HiSax> on how to configure it.
# on non-powerbook machines (but only on PMU based ones AFAIK)
config PMAC_BACKLIGHT
bool "Backlight control for LCD screens"
- depends on ADB_PMU
+ depends on ADB_PMU && (BROKEN || !PPC64)
help
Say Y here to build in code to manage the LCD backlight on a
Macintosh PowerBook. With this code, the backlight will be turned
{
up(&mddev->reconfig_sem);
- if (mddev->thread)
- md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->thread);
}
mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
mddev->raid_disks = sb->raid_disks;
mddev->size = sb->size;
mddev->events = md_event(sb);
+ mddev->bitmap_offset = 0;
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->size = le64_to_cpu(sb->size)/2;
mddev->events = le64_to_cpu(sb->events);
+ mddev->bitmap_offset = 0;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->pers = pers[pnum];
spin_unlock(&pers_lock);
+ mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
/* before we start the array running, initialise the bitmap */
mddev->in_sync = 1;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
if (mddev->sb_dirty)
md_update_sb(mddev);
fput(mddev->bitmap_file);
mddev->bitmap_file = NULL;
}
+ mddev->bitmap_offset = 0;
/*
* Free resources if final stop
export_rdev(rdev);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->thread)
- md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->thread);
return err;
}
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md");
+MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
int dibusb2_0_streaming_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b[2];
- b[0] = DIBUSB_REQ_SET_IOCTL;
- b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
+ u8 b[3] = { 0 };
+ int ret;
+
+ if ((ret = dibusb_streaming_ctrl(d,onoff)) < 0)
+ return ret;
- dvb_usb_generic_write(d,b,3);
+ if (onoff) {
+ b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
+ b[1] = 0x00;
+ if ((ret = dvb_usb_generic_write(d,b,2)) < 0)
+ return ret;
+ }
- return dibusb_streaming_ctrl(d,onoff);
+ b[0] = DIBUSB_REQ_SET_IOCTL;
+ b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
+ return dvb_usb_generic_write(d,b,3);
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
*/
if (newfeedcount == 0) {
deb_ts("stop feeding\n");
+ dvb_usb_urb_kill(d);
if (d->props.streaming_ctrl != NULL)
if ((ret = d->props.streaming_ctrl(d,0)))
err("error while stopping stream.");
- dvb_usb_urb_kill(d);
}
d->feedcount = newfeedcount;
* for reception.
*/
if (d->feedcount == onoff && d->feedcount > 0) {
+ deb_ts("submitting all URBs\n");
+ dvb_usb_urb_submit(d);
deb_ts("controlling pid parser\n");
if (d->props.caps & DVB_USB_HAS_PID_FILTER &&
return -ENODEV;
}
- dvb_usb_urb_submit(d);
}
return 0;
}
config VIDEO_M32R_AR_M64278
tristate "Use Colour AR module M64278(VGA)"
- depends on VIDEO_M32R_AR
+ depends on VIDEO_M32R_AR && PLAT_M32700UT
---help---
Say Y here to use the Renesas M64278E-800 camera module,
which supports VGA(640x480 pixcels) size of images.
{
struct net_device *dev;
struct cp_private *cp;
+ unsigned long flags;
dev = pci_get_drvdata (pdev);
cp = netdev_priv(dev);
cp_init_hw (cp);
netif_start_queue (dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+
+ spin_unlock_irqrestore (&cp->lock, flags);
return 0;
}
be called ibmveth.
config IBM_EMAC
- tristate "IBM PPC4xx EMAC driver support"
+ bool "IBM PPC4xx EMAC driver support"
depends on 4xx
select CRC32
---help---
config IBM_EMAC_ERRMSG
bool "Verbose error messages"
- depends on IBM_EMAC
+ depends on IBM_EMAC && BROKEN
config IBM_EMAC_RXB
int "Number of receive buffers"
* net_device_stats
* * introduced tx_timeout function
* * reworked locking
+ *
+ * 01-Jul-2005 Ben Dooks <ben@simtec.co.uk>
+ * * fixed spinlock call without pointer
+ * * ensure spinlock is initialised
*/
#include <linux/module.h>
static int dm9000_open(struct net_device *);
static int dm9000_start_xmit(struct sk_buff *, struct net_device *);
static int dm9000_stop(struct net_device *);
-static int dm9000_do_ioctl(struct net_device *, struct ifreq *, int);
static void dm9000_timer(unsigned long);
/* Save previous register address */
reg_save = readb(db->io_addr);
- spin_lock_irqsave(db->lock,flags);
+ spin_lock_irqsave(&db->lock,flags);
netif_stop_queue(dev);
dm9000_reset(db);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(db->lock,flags);
+ spin_unlock_irqrestore(&db->lock,flags);
}
int i;
u32 id_val;
- printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME);
-
/* Init network device */
ndev = alloc_etherdev(sizeof (struct board_info));
if (!ndev) {
db = (struct board_info *) ndev->priv;
memset(db, 0, sizeof (*db));
+ spin_lock_init(&db->lock);
+
if (pdev->num_resources < 2) {
ret = -ENODEV;
goto out;
ndev->stop = &dm9000_stop;
ndev->get_stats = &dm9000_get_stats;
ndev->set_multicast_list = &dm9000_hash_table;
- ndev->do_ioctl = &dm9000_do_ioctl;
#ifdef DM9000_PROGRAM_EEPROM
program_eeprom(db);
/* set and active a timer process */
init_timer(&db->timer);
- db->timer.expires = DM9000_TIMER_WUT * 2;
+ db->timer.expires = DM9000_TIMER_WUT;
db->timer.data = (unsigned long) dev;
db->timer.function = &dm9000_timer;
add_timer(&db->timer);
return &db->stats;
}
-/*
- * Process the upper socket ioctl command
- */
-static int
-dm9000_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- PRINTK1("entering %s\n",__FUNCTION__);
- return 0;
-}
/*
* A periodic timer routine
{
struct net_device *dev = (struct net_device *) data;
board_info_t *db = (board_info_t *) dev->priv;
- u8 reg_save;
- unsigned long flags;
PRINTK3("dm9000_timer()\n");
- spin_lock_irqsave(db->lock,flags);
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
mii_check_media(&db->mii, netif_msg_link(db), 0);
- /* Restore previous register address */
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(db->lock,flags);
-
/* Set timer again */
db->timer.expires = DM9000_TIMER_WUT;
add_timer(&db->timer);
{
board_info_t *db = (board_info_t *) dev->priv;
unsigned long flags;
+ unsigned int reg_save;
int ret;
spin_lock_irqsave(&db->lock,flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
spin_unlock_irqrestore(&db->lock,flags);
return ret;
{
board_info_t *db = (board_info_t *) dev->priv;
unsigned long flags;
+ unsigned long reg_save;
spin_lock_irqsave(&db->lock,flags);
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
udelay(500); /* Wait write complete */
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
spin_unlock_irqrestore(&db->lock,flags);
}
static int __init
dm9000_init(void)
{
+ printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME);
+
return driver_register(&dm9000_driver); /* search board and register */
}
struct e1000_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
+ e1000_clean_tx_irq(adapter);
enable_irq(adapter->pdev->irq);
}
#endif
{
struct sockaddr_ax25 *sa = addr;
- if (sa->sax25_family != AF_AX25)
- return -EINVAL;
-
- if (!sa->sax25_ndigis)
- return -EINVAL;
-
spin_lock_irq(&dev->xmit_lock);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
spin_unlock_irq(&dev->xmit_lock);
netif_start_queue(dev);
init_timer(&sp->tx_t);
+ sp->tx_t.function = sp_xmit_on_air;
+ sp->tx_t.data = (unsigned long) sp;
+
init_timer(&sp->resync_t);
spin_unlock_bh(&sp->lock);
TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
- iounmap(&tahp);
+ iounmap(tahp);
return 0;
}
};
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int emac_netpoll(struct net_device *ndev)
+static void emac_netpoll(struct net_device *ndev)
{
emac_rxeob_dev((void *)ndev, 0);
emac_txeob_dev((void *)ndev, 0);
- return 0;
}
#endif
ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
while (ioc3_r_micr() & MICR_BUSY);
- return ioc3_r_micr() & MIDR_DATA_MASK;
+ return ioc3_r_midr_r() & MIDR_DATA_MASK;
}
static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
dev->features = NETIF_F_IP_CSUM;
#endif
- ioc3_setup_duplex(ip);
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
goto out_stop;
mii_check_media(&ip->mii, 1, 1);
+ ioc3_setup_duplex(ip);
vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
model = (sw_physid2 >> 4) & 0x3f;
struct ethtool_drvinfo *info)
{
struct ioc3_private *ip = netdev_priv(dev);
-
+
strcpy (info->driver, IOC3_NAME);
strcpy (info->version, IOC3_VERSION);
strcpy (info->bus_info, pci_name(ip->pdev));
spin_lock_irq(&ip->ioc3_lock);
rc = mii_ethtool_sset(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
-
+
return rc;
}
.ethtool_ops = &loopback_ethtool_ops,
};
-/* Setup and register the of the LOOPBACK device. */
+/* Setup and register the loopback device. */
int __init loopback_init(void)
{
struct net_device_stats *stats;
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.35"
-#define DRV_MODULE_RELDATE "August 6, 2005"
+#define DRV_MODULE_VERSION "3.37"
+#define DRV_MODULE_RELDATE "August 25, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
err = -EIO;
- tg3_abort_hw(tp, 1);
-
tg3_reset_hw(tp);
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
tp->phy_id = hw_phy_id;
if (hw_phy_id_masked == PHY_ID_BCM8002)
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+ else
+ tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
} else {
if (tp->phy_id != PHY_ID_INVALID) {
/* Do nothing, phy ID already set up in
config TMS380TR
tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
- depends on TR && (PCI || ISA)
+ depends on TR && (PCI || ISA && ISA_DMA_API)
select FW_LOADER
---help---
This driver provides generic support for token ring adapters
config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
- depends on NET_RADIO && PCMCIA
+ depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
---help---
This is the standard Linux driver to support Cisco/Aironet PCMCIA
802.11 wireless cards. This driver is the same as the Aironet
config PARPORT_PC
tristate "PC-style hardware"
- depends on PARPORT && (!SPARC64 || PCI) && !SPARC32
+ depends on PARPORT && (!SPARC64 || PCI) && !SPARC32 && !M32R
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
#ifndef _PCIEHP_H
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <dely.l.sy@intel.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
#ifndef _SHPCHP_H
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <dely.l.sy@intel.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
}
}
-static void disable_msi_mode(struct pci_dev *dev, int pos, int type)
+void disable_msi_mode(struct pci_dev *dev, int pos, int type)
{
u16 control;
if (!pci_msi_enable || !dev)
return status;
+ if (dev->no_msi)
+ return status;
+
temp = dev->irq;
if ((status = msi_init()) < 0)
#define pci_msi_quirk 0
#endif
+#ifdef CONFIG_PCI_MSI
+void disable_msi_mode(struct pci_dev *dev, int pos, int type);
+#else
+static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
+#endif
+
extern int pcie_mch_quirk;
extern struct device_attribute pci_dev_attrs[];
extern struct class_device_attribute class_device_attr_cpuaffinity;
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch );
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch );
+
+/*
+ * It's possible for the MSI to get corrupted if shpc and acpi
+ * are used together on certain PXH-based systems.
+ */
+static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
+{
+ disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
+ PCI_CAP_ID_MSI);
+ dev->no_msi = 1;
+
+ printk(KERN_WARNING "PCI: PXH quirk detected, "
+ "disabling MSI for SHPC device\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
+
+
static void __devinit quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
* FIXME: IO should be max 256 bytes. However, since we may
* have a P2P bridge below a cardbus bridge, we need 4K.
*/
-#define CARDBUS_IO_SIZE (4096)
+#define CARDBUS_IO_SIZE (256)
#define CARDBUS_MEM_SIZE (32*1024*1024)
static void __devinit
if (resno < 6) {
reg = PCI_BASE_ADDRESS_0 + 4 * resno;
} else if (resno == PCI_ROM_RESOURCE) {
- new |= res->flags & IORESOURCE_ROM_ENABLE;
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ return;
+ new |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/* Hmm, non-standard resource. */
#ifdef CONFIG_PCMCIA_PROBE
+#include <asm/irq.h>
/* mask of IRQs already reserved by other cards, we should avoid using them */
static u8 pcmcia_used_irq[NR_IRQS];
#endif
if (drv->link.driver.probe) {
if (drv->link.driver.probe(&dev->dev)) {
dev->dev.driver = NULL;
+ dev->card_link = NULL;
+ up_write(&dev->dev.bus->subsys.rwsem);
return NULL;
}
}
/***************** SCRUBBER HELPER ROUTINES **********************/
-static inline volatile __u64
+static inline __u64
qdio_get_micros(void)
{
return (get_clock() >> 10); /* time>>12 is microseconds */
}
/* locked by the locks in qdio_activate and qdio_cleanup */
-static __u32 * volatile
+static __u32 volatile *
qdio_get_indicator(void)
{
int i;
#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support
-/**
- * If we are not using the sparse checker, __user has no use.
- */
-#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
-#else
-# define __user
-#endif
-
/**
* struct ica_rsa_modexpo
*
zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
u32 d_id)
{
- struct zfcp_port *port, *tmp_port;
+ struct zfcp_port *port;
int check_wwpn;
- scsi_id_t scsi_id;
- int found;
check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
-
/*
* check that there is no port with this WWPN already in list
*/
} else {
snprintf(port->sysfs_device.bus_id,
BUS_ID_SIZE, "0x%016llx", wwpn);
- port->sysfs_device.parent = &adapter->ccw_device->dev;
+ port->sysfs_device.parent = &adapter->ccw_device->dev;
}
port->sysfs_device.release = zfcp_sysfs_port_release;
dev_set_drvdata(&port->sysfs_device, port);
zfcp_port_get(port);
- scsi_id = 1;
- found = 0;
write_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry(tmp_port, &adapter->port_list_head, list) {
- if (atomic_test_mask(ZFCP_STATUS_PORT_NO_SCSI_ID,
- &tmp_port->status))
- continue;
- if (tmp_port->scsi_id != scsi_id) {
- found = 1;
- break;
- }
- scsi_id++;
- }
- port->scsi_id = scsi_id;
- if (found)
- list_add_tail(&port->list, &tmp_port->list);
- else
- list_add_tail(&port->list, &adapter->port_list_head);
+ list_add_tail(&port->list, &adapter->port_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
list_del(&port->list);
port->adapter->ports--;
write_unlock_irq(&zfcp_data.config_lock);
+ if (port->rport)
+ fc_remote_port_delete(port->rport);
+ port->rport = NULL;
zfcp_adapter_put(port->adapter);
zfcp_sysfs_port_remove_files(&port->sysfs_device,
atomic_read(&port->status));
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ struct fc_rport *rport;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev);
+ /* might be racy, but we cannot take config_lock due to the fact that
+ fc_remote_port_delete might sleep */
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (port->rport) {
+ rport = port->rport;
+ port->rport = NULL;
+ fc_remote_port_delete(rport);
+ }
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
zfcp_adapter_scsi_unregister(adapter);
*/
struct zfcp_port {
struct device sysfs_device; /* sysfs device */
+ struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
list */
u32 units; /* # of logical units in list */
atomic_t status; /* status of this remote port */
- scsi_id_t scsi_id; /* own SCSI ID */
wwn_t wwnn; /* WWNN if known */
wwn_t wwpn; /* WWPN */
fc_id_t d_id; /* D_ID */
if ((result == ZFCP_ERP_SUCCEEDED)
&& (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
&unit->status))
- && (!unit->device))
- scsi_add_device(unit->port->adapter->scsi_host, 0,
- unit->port->scsi_id, unit->scsi_lun);
+ && !unit->device
+ && port->rport)
+ scsi_add_device(port->adapter->scsi_host, 0,
+ port->rport->scsi_target_id,
+ unit->scsi_lun);
zfcp_unit_put(unit);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if ((result == ZFCP_ERP_SUCCEEDED)
+ && !atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN,
+ &port->status)
+ && !port->rport) {
+ struct fc_rport_identifiers ids;
+ ids.node_name = port->wwnn;
+ ids.port_name = port->wwpn;
+ ids.port_id = port->d_id;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ port->rport =
+ fc_remote_port_add(adapter->scsi_host, 0, &ids);
+ if (!port->rport)
+ ZFCP_LOG_NORMAL("failed registration of rport"
+ "(adapter %s, wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_port(port),
+ port->wwpn);
+ }
zfcp_port_put(port);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
struct scsi_cmnd *, struct timer_list *);
extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *,
struct timer_list *);
+extern void zfcp_set_fc_host_attrs(struct zfcp_adapter *);
+extern void zfcp_set_fc_rport_attrs(struct zfcp_port *);
extern struct scsi_transport_template *zfcp_transport_template;
extern struct fc_function_template zfcp_transport_functions;
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
}
+ zfcp_set_fc_host_attrs(adapter);
return 0;
}
struct zfcp_unit *unit, *retval = NULL;
list_for_each_entry(port, &adapter->port_list_head, list) {
- if (id != port->scsi_id)
+ if (!port->rport || (id != port->rport->scsi_target_id))
continue;
list_for_each_entry(unit, &port->unit_list_head, list) {
if (lun == unit->scsi_lun) {
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list) {
- if (id == port->scsi_id)
+ if (port->rport && (id == port->rport->scsi_target_id))
return port;
}
return (struct zfcp_port *) NULL;
{
int retval;
struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
- struct Scsi_Host *scsi_host = scpnt->device->host;
if (!unit) {
ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n");
{
int retval = 0;
struct zfcp_unit *unit;
- struct Scsi_Host *scsi_host = scpnt->device->host;
unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("bus reset because of problems with "
{
int retval = 0;
struct zfcp_unit *unit;
- struct Scsi_Host *scsi_host = scpnt->device->host;
unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("host reset because of problems with "
shost = adapter->scsi_host;
if (!shost)
return;
+ fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
+void
+zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
+{
+ struct Scsi_Host *shost = adapter->scsi_host;
+
+ fc_host_node_name(shost) = adapter->wwnn;
+ fc_host_port_name(shost) = adapter->wwpn;
+ strncpy(fc_host_serial_number(shost), adapter->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 32));
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+}
+
struct fc_function_template zfcp_transport_functions = {
.get_starget_port_id = zfcp_get_port_id,
.get_starget_port_name = zfcp_get_port_name,
.show_starget_port_id = 1,
.show_starget_port_name = 1,
.show_starget_node_name = 1,
+ .show_rport_supported_classes = 1,
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_serial_number = 1,
};
/**
ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
ZFCP_DEFINE_PORT_ATTR(wwnn, "0x%016llx\n", port->wwnn);
ZFCP_DEFINE_PORT_ATTR(d_id, "0x%06x\n", port->d_id);
-ZFCP_DEFINE_PORT_ATTR(scsi_id, "0x%x\n", port->scsi_id);
ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
static struct attribute *zfcp_port_no_ns_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
- &dev_attr_scsi_id.attr,
NULL
};
config MAC_SCSI
bool "Macintosh NCR5380 SCSI"
- depends on MAC && SCSI
+ depends on MAC && SCSI=y
help
This is the NCR 5380 SCSI controller included on most of the 68030
based Macintoshes. If you have one of these say Y and read the
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
- depends on MVME147 && SCSI
+ depends on MVME147 && SCSI=y
help
Support for the on-board SCSI controller on the Motorola MVME147
single-board computer.
config SUN3X_ESP
bool "Sun3x ESP SCSI"
- depends on SUN3X && SCSI
+ depends on SUN3X && SCSI=y
help
The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it.
MODULE_DESCRIPTION("AHCI SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
module_init(ahci_init);
module_exit(ahci_exit);
#
config SCSI_ACORNSCSI_3
tristate "Acorn SCSI card (aka30) support"
- depends on ARCH_ACORN && SCSI
+ depends on ARCH_ACORN && SCSI && BROKEN
help
This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N.
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
-#define DRV_VERSION "1.03"
+#define DRV_VERSION "1.04"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
* cross a page boundy.
*/
#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
-#define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY)
+
struct SGentry {
u32 address; /* bus! address */
u8 sg_count; /* No of HW sg entries for this request */
u8 sg_index; /* Index of HW sg entry for this request */
u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
- void **virt_map;
unsigned char *virt_addr; /* Virtual address of current transfer position */
/*
reqlen, cmd->request_buffer, cmd->use_sg,
srb->sg_count);
+ srb->virt_addr = page_address(sl->page);
for (i = 0; i < srb->sg_count; i++) {
- u32 seglen = (u32)sg_dma_len(sl + i);
- sgp[i].address = (u32)sg_dma_address(sl + i);
+ u32 busaddr = (u32)sg_dma_address(&sl[i]);
+ u32 seglen = (u32)sl[i].length;
+ sgp[i].address = busaddr;
sgp[i].length = seglen;
srb->total_xfer_length += seglen;
- srb->virt_map[i] = kmap(sl[i].page);
}
- srb->virt_addr = srb->virt_map[0];
sgp += srb->sg_count - 1;
/*
int segment = cmd->use_sg;
u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
struct SGentry *psge = srb->segment_x + srb->sg_index;
- void **virt = srb->virt_map;
dprintkdbg(DBG_0,
"sg_update_list: Transfered %i of %i bytes, %i remain\n",
/* We have to walk the scatterlist to find it */
sg = (struct scatterlist *)cmd->request_buffer;
- idx = 0;
while (segment--) {
unsigned long mask =
~((unsigned long)sg->length - 1) & PAGE_MASK;
if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
- srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK);
+ srb->virt_addr = (page_address(sg->page)
+ + psge->address -
+ (psge->address & PAGE_MASK));
return;
}
++sg;
- ++idx;
}
dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
}
/*
- * calculate all the residue data that not yet transfered
+ * calculate all the residue data that not yet tranfered
* SCSI transfer counter + left in SCSI FIFO data
*
* .....TRM_S1040_SCSI_COUNTER (24bits)
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
if (cmd->use_sg && dir != PCI_DMA_NONE) {
- int i;
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
cmd->use_sg, cmd->request_buffer);
/* unmap the sg segments */
- for (i = 0; i < srb->sg_count; i++)
- kunmap(virt_to_page(srb->virt_map[i]));
pci_unmap_sg(acb->dev,
(struct scatterlist *)cmd->request_buffer,
cmd->use_sg, dir);
if (cmd->use_sg) {
struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
- ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset);
+ ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset);
} else {
ptr = (struct ScsiInqData *)(cmd->request_buffer);
}
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
- kfree(acb->srb_array[i].segment_x);
-
- vfree(acb->srb_array[0].virt_map);
+ if (acb->srb_array[i].segment_x)
+ kfree(acb->srb_array[i].segment_x);
}
int srb_idx = 0;
unsigned i = 0;
struct SGentry *ptr;
- void **virt_array;
- for (i = 0; i < DC395x_MAX_SRB_CNT; i++) {
+ for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
- acb->srb_array[i].virt_map = NULL;
- }
dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
while (pages--) {
ptr + (i * DC395x_MAX_SG_LISTENTRY);
else
dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
-
- virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*));
-
- if (!virt_array) {
- adapter_sg_tables_free(acb);
- return 1;
- }
-
- for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) {
- acb->srb_array[i].virt_map = virt_array;
- virt_array += DC395x_MAX_SG_LISTENTRY;
- }
-
return 0;
}
* spin_lock_irqsave(host_set lock)
*/
-
-
-/**
- * ata_sg_init_one - Prepare a one-entry scatter-gather list.
- * @qc: Queued command
- * @buf: transfer buffer
- * @buflen: length of buf
- *
- * Builds a single-entry scatter-gather list to initiate a
- * transfer utilizing the specified buffer.
- *
- * LOCKING:
- */
void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
{
struct scatterlist *sg;
* spin_lock_irqsave(host_set lock)
*/
-
-/**
- * ata_sg_init - Assign a scatter gather list to a queued command
- * @qc: Queued command
- * @sg: Scatter-gather list
- * @n_elem: length of sg list
- *
- * Attaches a scatter-gather list to a queued command.
- *
- * LOCKING:
- */
-
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem)
{
#define __LIBATA_H__
#define DRV_NAME "libata"
-#define DRV_VERSION "1.11" /* must be exactly four chars */
+#define DRV_VERSION "1.12" /* must be exactly four chars */
struct ata_scsi_args {
u16 *id;
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
-#define DRV_VERSION "1.01"
+#define DRV_VERSION "1.02"
enum {
{
struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+ s->private = it;
if (! it)
return NULL;
+
if (NULL == sg_dev_arr)
- goto err1;
+ return NULL;
it->index = *pos;
it->max = sg_last_dev();
if (it->index >= it->max)
- goto err1;
+ return NULL;
return it;
-err1:
- kfree(it);
- return NULL;
}
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+ struct sg_proc_deviter * it = s->private;
*pos = ++it->index;
return (it->index < it->max) ? it : NULL;
static void dev_seq_stop(struct seq_file *s, void *v)
{
- kfree (v);
+ kfree(s->private);
}
static int sg_proc_open_dev(struct inode *inode, struct file *file)
config SERIAL_AMBA_PL010
tristate "ARM AMBA PL010 serial port support"
- depends on ARM_AMBA
+ depends on ARM_AMBA && (BROKEN || !ARCH_VERSATILE)
select SERIAL_CORE
help
This selects the ARM(R) AMBA(R) PrimeCell PL010 UART. If you have
config SERIAL_M32R_PLDSIO
bool "M32R SIO I/F on a PLD"
- depends on SERIAL_M32R_SIO=y
+ depends on SERIAL_M32R_SIO=y && (PLAT_OPSPUT || PALT_USRV || PLAT_M32700UT)
default n
help
Say Y here if you want to use the M32R serial controller
return uart_set_options(port, co, baud, parity, bits, flow);
}
-extern struct uart_driver m32r_sio_reg;
+static struct uart_driver m32r_sio_reg;
static struct console m32r_sio_console = {
.name = "ttyS",
.write = m32r_sio_console_write,
return -1;
sal_console_port.sc_ops = &poll_ops;
+ spin_lock_init(&sal_console_port.sc_port.lock);
early_sn_setup(); /* Find SAL entry points */
register_console(&sal_console_early);
goto exit;
}
- x = le16_to_cpu(*(__le16 *) &data[2]);
- y = le16_to_cpu(*(__le16 *) &data[4]);
-
input_regs(dev, regs);
if (data[1] & 0x10) { /* in prox */
}
}
- if (data[1] & 0x80) {
+ if (data[1] & 0x90) {
+ x = le16_to_cpu(*(__le16 *) &data[2]);
+ y = le16_to_cpu(*(__le16 *) &data[4]);
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
- }
- if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- input_report_abs(dev, ABS_PRESSURE, le16_to_cpu(*(__le16 *) &data[6]));
- input_report_key(dev, BTN_TOUCH, data[1] & 0x01);
- input_report_key(dev, BTN_STYLUS, data[1] & 0x02);
- input_report_key(dev, BTN_STYLUS2, data[1] & 0x04);
+ if (wacom->tool[0] != BTN_TOOL_MOUSE) {
+ input_report_abs(dev, ABS_PRESSURE, le16_to_cpu(*(__le16 *) &data[6]));
+ input_report_key(dev, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(dev, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(dev, BTN_STYLUS2, data[1] & 0x04);
+ }
}
input_report_key(dev, wacom->tool[0], data[1] & 0x10);
/* Cintiq doesn't send data when RDY bit isn't set */
if ((wacom->features->type == CINTIQ) && !(data[1] & 0x40))
- return;
+ goto exit;
if (wacom->features->type >= INTUOS3) {
input_report_abs(dev, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_main.c: Main file, module initiation and exit, registrations, etc.
+ *
+ * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
mondir = debugfs_create_dir("usbmon", NULL);
if (IS_ERR(mondir)) {
- printk(KERN_NOTICE TAG ": debugs is not available\n");
+ printk(KERN_NOTICE TAG ": debugfs is not available\n");
return -ENODEV;
}
if (mondir == NULL) {
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
+ *
+ * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#ifndef __USB_MON_H
// copy the packet data to the new skb
memcpy(skb_put(gl_skb, size), packet->packet_data, size);
- skb_return (dev, skb);
+ skb_return (dev, gl_skb);
}
// advance to the next packet
if (datalen<14)
goto resubmit;
if ((seq & IEEE802_11_SCTL_FRAG) == 0) {
- frag = kmalloc(sizeof(struct zd1201_frag*),
- GFP_ATOMIC);
+ frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
if (!frag)
goto resubmit;
skb = dev_alloc_skb(IEEE802_11_DATA_LEN +14+2);
config VGA_CONSOLE
bool "VGA text console" if EMBEDDED || !X86
- depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC32 && !SPARC64 && !M68K && !PARISC
+ depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC32 && !SPARC64 && !M68K && !PARISC && !ARCH_VERSATILE
default y
help
Saying Y here will allow you to use Linux in text mode through a
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
- int err;
+ int err, flags = info->flags;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
!list_empty(&info->modelist))
err = fb_add_videomode(&mode, &info->modelist);
- if (!err && info->flags & FBINFO_MISC_USEREVENT) {
+ if (!err && (flags & FBINFO_MISC_USEREVENT)) {
struct fb_event event;
info->flags &= ~FBINFO_MISC_USEREVENT;
return -ENODEV;
}
- /* Map the fb and MMIO regions */
- dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
- (dinfo->aperture.physical, dinfo->aperture.size);
- if (!dinfo->aperture.virtual) {
- ERR_MSG("Cannot remap FB region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
- dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
- INTEL_REG_SIZE);
- if (!dinfo->mmio_base) {
- ERR_MSG("Cannot remap MMIO region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
/* Get the chipset info. */
dinfo->pci_chipset = pdev->device;
dinfo->accel = 0;
}
+ if (MB(voffset) < stolen_size)
+ offset = (stolen_size >> 12);
+ else
+ offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
+
/* Framebuffer parameters - Use all the stolen memory if >= vram */
- if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
+ if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) {
dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
+ dinfo->fb.offset = 0;
dinfo->fbmem_gart = 0;
} else {
dinfo->fb.size = MB(vram);
return -ENODEV;
}
- if (MB(voffset) < stolen_size)
- offset = (stolen_size >> 12);
- else
- offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
-
/* set the mem offsets - set them after the already used pages */
if (dinfo->accel) {
dinfo->ring.offset = offset + gtt_info.current_memory;
+ (dinfo->cursor.size >> 12);
}
+ /* Map the fb and MMIO regions */
+ /* ioremap only up to the end of used aperture */
+ dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
+ (dinfo->aperture.physical, (dinfo->fb.offset << 12)
+ + dinfo->fb.size);
+ if (!dinfo->aperture.virtual) {
+ ERR_MSG("Cannot remap FB region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
+ dinfo->mmio_base =
+ (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ INTEL_REG_SIZE);
+ if (!dinfo->mmio_base) {
+ ERR_MSG("Cannot remap MMIO region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
/* Allocate memories (which aren't stolen) */
if (dinfo->accel) {
if (!(dinfo->gtt_ring_mem =
#include <video/radeon.h>
#include <linux/radeonfb.h>
-#define DEBUG 1
+#define DEBUG 0
#if DEBUG
#define RTRACE printk
* Return 0 - device(s) present, 1 - no devices present.
*/
if (w1_reset_bus(dev)) {
- dev_info(&dev->dev, "No devices present on the wire.\n");
+ dev_dbg(&dev->dev, "No devices present on the wire.\n");
break;
}
struct dentry *dentry,
struct nameidata *nd);
static int afs_mntpt_open(struct inode *inode, struct file *file);
-static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
struct file_operations afs_mntpt_file_operations = {
.open = afs_mntpt_open,
/*
* follow a link from a mountpoint directory, thus causing it to be mounted
*/
-static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct vfsmount *newmnt;
struct dentry *old_dentry;
newmnt = afs_mntpt_do_automount(dentry);
if (IS_ERR(newmnt)) {
path_release(nd);
- return PTR_ERR(newmnt);
+ return (void *)newmnt;
}
old_dentry = nd->dentry;
}
kleave(" = %d", err);
- return err;
+ return ERR_PTR(err);
} /* end afs_mntpt_follow_link() */
/*****************************************************************************/
#include "autofs_i.h"
-static int autofs_follow_link(struct dentry *dentry, struct nameidata *nd)
+/* Nothing to release.. */
+static void *autofs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
nd_set_link(nd, s);
- return 0;
+ return NULL;
}
struct inode_operations autofs_symlink_inode_operations = {
#include "autofs_i.h"
-static int autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct autofs_info *ino = autofs4_dentry_ino(dentry);
nd_set_link(nd, (char *)ino->u.symlink);
- return 0;
+ return NULL;
}
struct inode_operations autofs4_symlink_inode_operations = {
static void befs_destroy_inode(struct inode *inode);
static int befs_init_inodecache(void);
static void befs_destroy_inodecache(void);
-static int befs_follow_link(struct dentry *, struct nameidata *);
-static void befs_put_link(struct dentry *, struct nameidata *);
+static void *befs_follow_link(struct dentry *, struct nameidata *);
+static void befs_put_link(struct dentry *, struct nameidata *, void *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
* The data stream become link name. Unless the LONG_SYMLINK
* flag is set.
*/
-static int
+static void *
befs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
}
nd_set_link(nd, link);
- return 0;
+ return NULL;
}
-static void befs_put_link(struct dentry *dentry, struct nameidata *nd)
+static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+Version 1.35
+------------
+Add writepage performance improvements. Fix path name conversions
+for long filenames on mounts which were done with "mapchars" mount option
+specified.
+
Version 1.34
------------
Fix error mapping of the TOO_MANY_LINKS (hardlinks) case.
extern struct dentry_operations cifs_dentry_ops;
/* Functions related to symlinks */
-extern int cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd);
+extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
+extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *);
extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
int buflen);
extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
if(name_len < PATH_MAX) {
memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
byte_count += name_len;
+ /* 14 byte parm len above enough for 2 byte null terminator */
+ pSMB->ResumeFileName[name_len] = 0;
+ pSMB->ResumeFileName[name_len+1] = 0;
} else {
rc = -EINVAL;
goto FNext2_err_exit;
netfid, length,
pfLock->fl_start, numUnlock, numLock, lockType,
wait_flag);
- if (rc == 0 && (pfLock->fl_flags & FL_POSIX))
+ if (pfLock->fl_flags & FL_POSIX)
posix_lock_file_wait(file, pfLock);
FreeXid(xid);
return rc;
return rc;
}
-int
+void *
cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
{
struct inode *inode = direntry->d_inode;
out_no_free:
FreeXid(xid);
nd_set_link(nd, target_path);
- return 0;
+ return NULL; /* No cookie */
}
int
return rc;
}
-void cifs_put_link(struct dentry *direntry, struct nameidata *nd)
+void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
{
char *p = nd_get_link(nd);
if (!IS_ERR(p))
src_char = source[i];
switch (src_char) {
case 0:
+ target[j] = 0;
goto ctoUCS_out;
case ':':
target[j] = cpu_to_le16(UNI_COLON);
return 0;
} /* End Function devfs_mknod */
-static int devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct devfs_entry *p = get_devfs_entry_from_vfs_inode(dentry->d_inode);
nd_set_link(nd, p ? p->u.symlink.linkname : ERR_PTR(-ENODEV));
- return 0;
+ return NULL;
} /* End Function devfs_follow_link */
static struct inode_operations devfs_iops = {
#include "xattr.h"
#include <linux/namei.h>
-static int ext2_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *ext2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct ext2_inode_info *ei = EXT2_I(dentry->d_inode);
nd_set_link(nd, (char *)ei->i_data);
- return 0;
+ return NULL;
}
struct inode_operations ext2_symlink_inode_operations = {
#include <linux/namei.h>
#include "xattr.h"
-static int ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
nd_set_link(nd, (char*)ei->i_data);
- return 0;
+ return NULL;
}
struct inode_operations ext3_symlink_inode_operations = {
#include "vxfs_inode.h"
-static int vxfs_immed_follow_link(struct dentry *, struct nameidata *);
+static void * vxfs_immed_follow_link(struct dentry *, struct nameidata *);
static int vxfs_immed_readpage(struct file *, struct page *);
* Returns:
* Zero on success, else a negative error code.
*/
-static int
+static void *
vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np)
{
struct vxfs_inode_info *vip = VXFS_INO(dp->d_inode);
nd_set_link(np, vip->vii_immed.vi_immed);
- return 0;
+ return NULL;
}
/**
static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode)
{
- return(list_entry(inode, struct hppfs_inode_info, vfs_inode));
+ return container_of(inode, struct hppfs_inode_info, vfs_inode);
}
#define HPPFS_SUPER_MAGIC 0xb00000ee
{
struct file *proc_file;
struct dentry *proc_dentry;
- int (*readlink)(struct dentry *, char *, int);
- int err, n;
+ int ret;
proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
- err = PTR_ERR(proc_dentry);
- if(IS_ERR(proc_dentry))
- return(err);
+ if (IS_ERR(proc_file))
+ return PTR_ERR(proc_file);
- readlink = proc_dentry->d_inode->i_op->readlink;
- n = (*readlink)(proc_dentry, buffer, buflen);
+ ret = proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen);
fput(proc_file);
- return(n);
+ return ret;
}
-static int hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void* hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct file *proc_file;
struct dentry *proc_dentry;
- int (*follow_link)(struct dentry *, struct nameidata *);
- int err, n;
+ void *ret;
proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
- err = PTR_ERR(proc_dentry);
- if(IS_ERR(proc_dentry))
- return(err);
+ if (IS_ERR(proc_file))
+ return proc_file;
- follow_link = proc_dentry->d_inode->i_op->follow_link;
- n = (*follow_link)(proc_dentry, nd);
+ ret = proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd);
fput(proc_file);
- return(n);
+ return ret;
}
static struct inode_operations hppfs_dir_iops = {
do {
if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
return -ENOSPC;
- ret = idr_get_new_above(&dev->idr, watch, dev->last_wd, &watch->wd);
+ ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
} while (ret == -EAGAIN);
return ret;
return ERR_PTR(ret);
}
- dev->last_wd = ret;
+ dev->last_wd = watch->wd;
watch->mask = mask;
atomic_set(&watch->count, 0);
INIT_LIST_HEAD(&watch->d_list);
break;
case IOPRIO_CLASS_IDLE:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
break;
default:
return -EINVAL;
#include <linux/namei.h>
#include "nodelist.h"
-static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
struct inode_operations jffs2_symlink_inode_operations =
{
.setattr = jffs2_setattr
};
-static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+ char *p = (char *)f->dents;
/*
* We don't acquire the f->sem mutex here since the only data we
* nd_set_link() call.
*/
- if (!f->dents) {
+ if (!p) {
printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n");
- return -EIO;
+ p = ERR_PTR(-EIO);
+ } else {
+ D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents));
}
- D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents));
- nd_set_link(nd, (char *)f->dents);
+ nd_set_link(nd, p);
/*
* We unlock the f->sem mutex but VFS will use the f->dents string. This is safe
* since the only way that may cause f->dents to be changed is iput() operation.
* But VFS will not use f->dents after iput() has been called.
*/
- return 0;
+ return NULL;
}
{
jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
+ if (is_bad_inode(inode) ||
+ (JFS_IP(inode)->fileset != cpu_to_le32(FILESYSTEM_I)))
+ return;
+
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
static bio_end_io_t lbmIODone;
static void lbmStartIO(struct lbuf * bp);
static void lmGCwrite(struct jfs_log * log, int cant_block);
-static int lmLogSync(struct jfs_log * log, int nosyncwait);
+static int lmLogSync(struct jfs_log * log, int hard_sync);
* if new sync address is available
* (normally the case if sync() is executed by back-ground
* process).
- * if not, explicitly run jfs_blogsync() to initiate
- * getting of new sync address.
* calculate new value of i_nextsync which determines when
* this code is called again.
*
* PARAMETERS: log - log structure
- * nosyncwait - 1 if called asynchronously
+ * hard_sync - 1 to force all metadata to be written
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
-static int lmLogSync(struct jfs_log * log, int nosyncwait)
+static int lmLogSync(struct jfs_log * log, int hard_sync)
{
int logsize;
int written; /* written since last syncpt */
unsigned long flags;
/* push dirty metapages out to disk */
- list_for_each_entry(sbi, &log->sb_list, log_list) {
- filemap_flush(sbi->ipbmap->i_mapping);
- filemap_flush(sbi->ipimap->i_mapping);
- filemap_flush(sbi->direct_inode->i_mapping);
- }
+ if (hard_sync)
+ list_for_each_entry(sbi, &log->sb_list, log_list) {
+ filemap_fdatawrite(sbi->ipbmap->i_mapping);
+ filemap_fdatawrite(sbi->ipimap->i_mapping);
+ filemap_fdatawrite(sbi->direct_inode->i_mapping);
+ }
+ else
+ list_for_each_entry(sbi, &log->sb_list, log_list) {
+ filemap_flush(sbi->ipbmap->i_mapping);
+ filemap_flush(sbi->ipimap->i_mapping);
+ filemap_flush(sbi->direct_inode->i_mapping);
+ }
/*
* forward syncpt
/* next syncpt trigger = written + more */
log->nextsync = written + more;
- /* return if lmLogSync() from outside of transaction, e.g., sync() */
- if (nosyncwait)
- return lsn;
-
/* if number of bytes written from last sync point is more
* than 1/4 of the log size, stop new transactions from
* starting until all current transactions are completed
*
* FUNCTION: write log SYNCPT record for specified log
*
- * PARAMETERS: log - log structure
+ * PARAMETERS: log - log structure
+ * hard_sync - set to 1 to force metadata to be written
*/
-void jfs_syncpt(struct jfs_log *log)
+void jfs_syncpt(struct jfs_log *log, int hard_sync)
{ LOG_LOCK(log);
- lmLogSync(log, 1);
+ lmLogSync(log, hard_sync);
LOG_UNLOCK(log);
}
extern int lmGroupCommit(struct jfs_log *, struct tblock *);
extern int jfsIOWait(void *);
extern void jfs_flush_journal(struct jfs_log * log, int wait);
-extern void jfs_syncpt(struct jfs_log *log);
+extern void jfs_syncpt(struct jfs_log *log, int hard_sync);
#endif /* _H_JFS_LOGMGR */
* synchronize with logsync barrier
*/
if (test_bit(log_SYNCBARRIER, &log->flag)) {
+ TXN_UNLOCK();
+
+ /* write dirty metadata & forward log syncpt */
+ jfs_syncpt(log, 1);
+
jfs_info("log barrier off: 0x%x", log->lsn);
/* enable new transactions start */
/* wakeup all waitors for logsync barrier */
TXN_WAKEUP(&log->syncwait);
- TXN_UNLOCK();
-
- /* forward log syncpt */
- jfs_syncpt(log);
-
goto wakeup;
}
}
/* only anonymous txn.
* Remove from anon_list
*/
+ TXN_LOCK();
list_del_init(&jfs_ip->anon_inode_list);
+ TXN_UNLOCK();
}
jfs_ip->atlhead = tlck->next;
} else {
{
struct jfs_inode_info *ji = JFS_IP(inode);
+ BUG_ON(!list_empty(&ji->anon_inode_list));
+
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
/* log == NULL indicates read-only mount */
if (log) {
jfs_flush_journal(log, wait);
- jfs_syncpt(log);
+ jfs_syncpt(log, 0);
}
return 0;
#include "jfs_inode.h"
#include "jfs_xattr.h"
-static int jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char *s = JFS_IP(dentry->d_inode)->i_inline;
nd_set_link(nd, s);
- return 0;
+ return NULL;
}
struct inode_operations jfs_symlink_inode_operations = {
static inline int __do_follow_link(struct path *path, struct nameidata *nd)
{
int error;
+ void *cookie;
struct dentry *dentry = path->dentry;
touch_atime(path->mnt, dentry);
if (path->mnt == nd->mnt)
mntget(path->mnt);
- error = dentry->d_inode->i_op->follow_link(dentry, nd);
- if (!error) {
+ cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(cookie);
+ if (!IS_ERR(cookie)) {
char *s = nd_get_link(nd);
+ error = 0;
if (s)
error = __vfs_follow_link(nd, s);
if (dentry->d_inode->i_op->put_link)
- dentry->d_inode->i_op->put_link(dentry, nd);
+ dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
dput(dentry);
mntput(path->mnt);
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error) {
const char *new_name = old_dentry->d_name.name;
- fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, new_dentry->d_inode);
+ fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
+ new_dentry->d_inode, old_dentry->d_inode);
}
fsnotify_oldname_free(old_name);
int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct nameidata nd;
- int res;
+ void *cookie;
+
nd.depth = 0;
- res = dentry->d_inode->i_op->follow_link(dentry, &nd);
- if (!res) {
- res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
+ cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
+ if (!IS_ERR(cookie)) {
+ int res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
if (dentry->d_inode->i_op->put_link)
- dentry->d_inode->i_op->put_link(dentry, &nd);
+ dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
+ cookie = ERR_PTR(res);
}
- return res;
+ return PTR_ERR(cookie);
}
int vfs_follow_link(struct nameidata *nd, const char *link)
return res;
}
-int page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
+void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
{
- struct page *page;
+ struct page *page = NULL;
nd_set_link(nd, page_getlink(dentry, &page));
- return 0;
+ return page;
}
-void page_put_link(struct dentry *dentry, struct nameidata *nd)
+void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
- if (!IS_ERR(nd_get_link(nd))) {
- struct page *page;
- page = find_get_page(dentry->d_inode->i_mapping, 0);
- if (!page)
- BUG();
+ struct page *page = cookie;
+
+ if (page) {
kunmap(page);
page_cache_release(page);
- page_cache_release(page);
}
}
/* We requested READDIRPLUS, but the server doesn't grok it */
if (error == -ENOTSUPP && desc->plus) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
- NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS;
+ clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
desc->plus = 0;
goto again;
}
goto error;
}
SetPageUptodate(page);
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+ spin_unlock(&inode->i_lock);
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
* through inode->i_sem or some other mechanism.
page,
NFS_SERVER(inode)->dtsize,
desc->plus);
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+ spin_unlock(&inode->i_lock);
desc->page = page;
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
if (desc->error >= 0) {
break;
}
if (res == -ETOOSMALL && desc->plus) {
- NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS;
+ clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
nfs_zap_caches(inode);
desc->plus = 0;
desc->entry->eof = 0;
{
if (IS_ROOT(dentry))
return 1;
- if ((NFS_FLAGS(dir) & NFS_INO_INVALID_ATTR) != 0
+ if ((NFS_I(dir)->cache_validity & NFS_INO_INVALID_ATTR) != 0
|| nfs_attribute_timeout(dir))
return 0;
return nfs_verify_change_attribute(dir, (unsigned long)dentry->d_fsdata);
error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
if (error < 0) {
res = ERR_PTR(error);
+ unlock_kernel();
goto out;
}
int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
{
- struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_access_entry *cache = &nfsi->cache_access;
if (cache->cred != cred
|| time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))
- || (NFS_FLAGS(inode) & NFS_INO_INVALID_ACCESS))
+ || (nfsi->cache_validity & NFS_INO_INVALID_ACCESS))
return -ENOENT;
memcpy(res, cache, sizeof(*res));
return 0;
void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
{
- struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_access_entry *cache = &nfsi->cache_access;
if (cache->cred != set->cred) {
if (cache->cred)
put_rpccred(cache->cred);
cache->cred = get_rpccred(set->cred);
}
- NFS_FLAGS(inode) &= ~NFS_INO_INVALID_ACCESS;
+ /* FIXME: replace current access_cache BKL reliance with inode->i_lock */
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
+ spin_unlock(&inode->i_lock);
cache->jiffies = set->jiffies;
cache->mask = set->mask;
}
*/
static int nfs_revalidate_file(struct inode *inode, struct file *filp)
{
+ struct nfs_inode *nfsi = NFS_I(inode);
int retval = 0;
- if ((NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode))
+ if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode))
retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
nfs_revalidate_mapping(inode, filp->f_mapping);
return 0;
goto force_reval;
if (nfsi->npages != 0)
return 0;
- if (!(NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
+ if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
return 0;
force_reval:
return __nfs_revalidate_inode(server, inode);
struct nfs_inode *nfsi = NFS_I(inode);
int mode = inode->i_mode;
+ spin_lock(&inode->i_lock);
+
NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
NFS_ATTRTIMEO_UPDATE(inode) = jiffies;
memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
- nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
else
- nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+
+ spin_unlock(&inode->i_lock);
}
static void nfs_zap_acl_cache(struct inode *inode)
clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
if (clear_acl_cache != NULL)
clear_acl_cache(inode);
- NFS_I(inode)->flags &= ~NFS_INO_INVALID_ACL;
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
+ spin_unlock(&inode->i_lock);
}
/*
inode->i_fop = &nfs_dir_operations;
if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
&& fattr->size <= NFS_LIMIT_READDIRPLUS)
- NFS_FLAGS(inode) |= NFS_INO_ADVISE_RDPLUS;
+ set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
} else if (S_ISLNK(inode->i_mode))
inode->i_op = &nfs_symlink_inode_operations;
else
nfs_wb_all(inode);
}
error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
- if (error == 0) {
+ if (error == 0)
nfs_refresh_inode(inode, &fattr);
+ nfs_end_data_update(inode);
+ unlock_kernel();
+ return error;
+}
+
+/**
+ * nfs_setattr_update_inode - Update inode metadata after a setattr call.
+ * @inode: pointer to struct inode
+ * @attr: pointer to struct iattr
+ *
+ * Note: we do this in the *proc.c in order to ensure that
+ * it works for things like exclusive creates too.
+ */
+void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+{
+ if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
if ((attr->ia_valid & ATTR_MODE) != 0) {
- int mode;
- mode = inode->i_mode & ~S_IALLUGO;
- mode |= attr->ia_mode & S_IALLUGO;
+ int mode = attr->ia_mode & S_IALLUGO;
+ mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
}
if ((attr->ia_valid & ATTR_UID) != 0)
inode->i_uid = attr->ia_uid;
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
- if ((attr->ia_valid & ATTR_SIZE) != 0) {
- inode->i_size = attr->ia_size;
- vmtruncate(inode, attr->ia_size);
- }
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ spin_unlock(&inode->i_lock);
}
- if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- nfs_end_data_update(inode);
- unlock_kernel();
- return error;
+ if ((attr->ia_valid & ATTR_SIZE) != 0) {
+ inode->i_size = attr->ia_size;
+ vmtruncate(inode, attr->ia_size);
+ }
+}
+
+static int nfs_wait_schedule(void *word)
+{
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ schedule();
+ return 0;
}
/*
* Wait for the inode to get unlocked.
- * (Used for NFS_INO_LOCKED and NFS_INO_REVALIDATING).
*/
-static int
-nfs_wait_on_inode(struct inode *inode, int flag)
+static int nfs_wait_on_inode(struct inode *inode)
{
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_inode *nfsi = NFS_I(inode);
-
+ sigset_t oldmask;
int error;
- if (!(NFS_FLAGS(inode) & flag))
- return 0;
+
atomic_inc(&inode->i_count);
- error = nfs_wait_event(clnt, nfsi->nfs_i_wait,
- !(NFS_FLAGS(inode) & flag));
+ rpc_clnt_sigmask(clnt, &oldmask);
+ error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
+ nfs_wait_schedule, TASK_INTERRUPTIBLE);
+ rpc_clnt_sigunmask(clnt, &oldmask);
iput(inode);
+
return error;
}
+static void nfs_wake_up_inode(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ clear_bit(NFS_INO_REVALIDATING, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&nfsi->flags, NFS_INO_REVALIDATING);
+}
+
int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
- struct nfs_inode *nfsi = NFS_I(inode);
- int need_atime = nfsi->flags & NFS_INO_INVALID_ATIME;
+ int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err;
if (__IS_FLG(inode, MS_NOATIME))
struct nfs_fattr fattr;
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long verifier;
- unsigned int flags;
+ unsigned long cache_validity;
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
inode->i_sb->s_id, (long long)NFS_FILEID(inode));
if (NFS_STALE(inode))
goto out_nowait;
- while (NFS_REVALIDATING(inode)) {
- status = nfs_wait_on_inode(inode, NFS_INO_REVALIDATING);
- if (status < 0)
- goto out_nowait;
- if (NFS_ATTRTIMEO(inode) == 0)
- continue;
- if (NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ATIME))
- continue;
- status = NFS_STALE(inode) ? -ESTALE : 0;
- goto out_nowait;
+ status = nfs_wait_on_inode(inode);
+ if (status < 0)
+ goto out;
+ if (NFS_STALE(inode)) {
+ status = -ESTALE;
+ /* Do we trust the cached ESTALE? */
+ if (NFS_ATTRTIMEO(inode) != 0) {
+ if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ATIME)) {
+ /* no */
+ } else
+ goto out;
+ }
}
- NFS_FLAGS(inode) |= NFS_INO_REVALIDATING;
/* Protect against RPC races by saving the change attribute */
verifier = nfs_save_change_attribute(inode);
if (status == -ESTALE) {
nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
- NFS_FLAGS(inode) |= NFS_INO_STALE;
+ set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
}
goto out;
}
(long long)NFS_FILEID(inode), status);
goto out;
}
- flags = nfsi->flags;
- nfsi->flags &= ~NFS_INO_REVAL_PAGECACHE;
+ spin_lock(&inode->i_lock);
+ cache_validity = nfsi->cache_validity;
+ nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+
/*
* We may need to keep the attributes marked as invalid if
* we raced with nfs_end_attr_update().
*/
if (verifier == nfsi->cache_change_attribute)
- nfsi->flags &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
- /* Do the page cache invalidation */
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
+ spin_unlock(&inode->i_lock);
+
nfs_revalidate_mapping(inode, inode->i_mapping);
- if (flags & NFS_INO_INVALID_ACL)
+
+ if (cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
+
dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode));
-out:
- NFS_FLAGS(inode) &= ~NFS_INO_REVALIDATING;
- wake_up(&nfsi->nfs_i_wait);
+ out:
+ nfs_wake_up_inode(inode);
+
out_nowait:
unlock_kernel();
return status;
*/
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
- if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
+ if (!(NFS_I(inode)->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
&& !nfs_attribute_timeout(inode))
return NFS_STALE(inode) ? -ESTALE : 0;
return __nfs_revalidate_inode(server, inode);
{
struct nfs_inode *nfsi = NFS_I(inode);
- if (nfsi->flags & NFS_INO_INVALID_DATA) {
+ if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
if (S_ISREG(inode->i_mode)) {
if (filemap_fdatawrite(mapping) == 0)
filemap_fdatawait(mapping);
nfs_wb_all(inode);
}
invalidate_inode_pages2(mapping);
- nfsi->flags &= ~NFS_INO_INVALID_DATA;
+
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
if (S_ISDIR(inode->i_mode)) {
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
/* This ensures we revalidate child dentries */
nfsi->cache_change_attribute++;
}
+ spin_unlock(&inode->i_lock);
+
dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode));
if (!nfs_have_delegation(inode, FMODE_READ)) {
/* Mark the attribute cache for revalidation */
- nfsi->flags |= NFS_INO_INVALID_ATTR;
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
/* Directories and symlinks: invalidate page cache too */
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- nfsi->flags |= NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ spin_unlock(&inode->i_lock);
}
nfsi->cache_change_attribute ++;
atomic_dec(&nfsi->data_updates);
if (nfs_have_delegation(inode, FMODE_READ))
return 0;
+ spin_lock(&inode->i_lock);
+
/* Are we in the process of updating data on the server? */
data_unstable = nfs_caches_unstable(inode);
&& nfsi->change_attr == fattr->pre_change_attr)
nfsi->change_attr = fattr->change_attr;
if (nfsi->change_attr != fattr->change_attr) {
- nfsi->flags |= NFS_INO_INVALID_ATTR;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (!data_unstable)
- nfsi->flags |= NFS_INO_REVAL_PAGECACHE;
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
}
- if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0) {
+ spin_unlock(&inode->i_lock);
return 0;
+ }
/* Has the inode gone and changed behind our back? */
if (nfsi->fileid != fattr->fileid
- || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+ || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+ spin_unlock(&inode->i_lock);
return -EIO;
+ }
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
/* Verify a few of the more important attributes */
if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
- nfsi->flags |= NFS_INO_INVALID_ATTR;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (!data_unstable)
- nfsi->flags |= NFS_INO_REVAL_PAGECACHE;
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
if (cur_size != new_isize) {
- nfsi->flags |= NFS_INO_INVALID_ATTR;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (nfsi->npages == 0)
- nfsi->flags |= NFS_INO_REVAL_PAGECACHE;
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
/* Have any file permissions changed? */
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
|| inode->i_uid != fattr->uid
|| inode->i_gid != fattr->gid)
- nfsi->flags |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
/* Has the link count changed? */
if (inode->i_nlink != fattr->nlink)
- nfsi->flags |= NFS_INO_INVALID_ATTR;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (!timespec_equal(&inode->i_atime, &fattr->atime))
- nfsi->flags |= NFS_INO_INVALID_ATIME;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATIME;
nfsi->read_cache_jiffies = fattr->timestamp;
+ spin_unlock(&inode->i_lock);
return 0;
}
goto out_err;
}
+ spin_lock(&inode->i_lock);
+
/*
* Make sure the inode's type hasn't changed.
*/
- if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+ if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+ spin_unlock(&inode->i_lock);
goto out_changed;
+ }
/*
* Update the read time so we don't revalidate too often.
|| S_ISLNK(inode->i_mode)))
invalid &= ~NFS_INO_INVALID_DATA;
if (!nfs_have_delegation(inode, FMODE_READ))
- nfsi->flags |= invalid;
+ nfsi->cache_validity |= invalid;
+ spin_unlock(&inode->i_lock);
return 0;
out_changed:
/*
*/
nfs_invalidate_inode(inode);
out_err:
- NFS_FLAGS(inode) |= NFS_INO_STALE;
+ set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
return -ESTALE;
}
nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
if (!nfsi)
return NULL;
- nfsi->flags = 0;
+ nfsi->flags = 0UL;
+ nfsi->cache_validity = 0UL;
#ifdef CONFIG_NFS_V3_ACL
nfsi->acl_access = ERR_PTR(-EAGAIN);
nfsi->acl_default = ERR_PTR(-EAGAIN);
nfsi->ndirty = 0;
nfsi->ncommit = 0;
nfsi->npages = 0;
- init_waitqueue_head(&nfsi->nfs_i_wait);
nfs4_init_once(nfsi);
}
}
nfs_begin_data_update(inode);
status = rpc_call(server->client_acl, ACLPROC3_SETACL,
&args, &fattr, 0);
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS;
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS;
+ spin_unlock(&inode->i_lock);
nfs_end_data_update(inode);
dprintk("NFS reply setacl: %d\n", status);
dprintk("NFS call setattr\n");
fattr->valid = 0;
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
dprintk("NFS reply setattr: %d\n", status);
return status;
}
* not sure this buys us anything (and I'd have
* to revamp the NFSv3 XDR code) */
status = nfs3_proc_setattr(dentry, &fattr, sattr);
+ if (status == 0)
+ nfs_setattr_update_inode(dentry->d_inode, sattr);
nfs_refresh_inode(dentry->d_inode, &fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
}
.rpc_argp = &arg,
.rpc_resp = &res,
};
+ int status;
fattr->valid = 0;
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- return rpc_call_sync(server->client, &msg, 0);
+ status = rpc_call_sync(server->client, &msg, 0);
+ return status;
}
static int nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
status = nfs4_do_setattr(NFS_SERVER(inode), fattr,
NFS_FH(inode), sattr, state);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
if (state != NULL)
nfs4_close_state(state, FMODE_WRITE);
put_rpccred(cred);
struct nfs_fattr fattr;
status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
NFS_FH(state->inode), sattr, state);
- if (status == 0)
+ if (status == 0) {
+ nfs_setattr_update_inode(state->inode, sattr);
goto out;
+ }
} else if (flags != 0)
goto out;
nfs4_close_state(state, flags);
dprintk("NFS call setattr\n");
fattr->valid = 0;
status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
dprintk("NFS reply setattr: %d\n", status);
return status;
}
if (rdata->res.eof != 0 || result == 0)
break;
} while (count);
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+ spin_unlock(&inode->i_lock);
if (count)
memclear_highpage_flush(page, rdata->args.pgbase, count);
}
task->tk_status = -EIO;
}
- NFS_FLAGS(data->inode) |= NFS_INO_INVALID_ATIME;
+ spin_lock(&data->inode->i_lock);
+ NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+ spin_unlock(&data->inode->i_lock);
data->complete(data, status);
}
/* Symlink caching in the page cache is even more simplistic
* and straight-forward than readdir caching.
- *
- * At the beginning of the page we store pointer to struct page in question,
- * simplifying nfs_put_link() (if inode got invalidated we can't find the page
- * to be freed via pagecache lookup).
- * The NUL-terminated string follows immediately thereafter.
*/
-struct nfs_symlink {
- struct page *page;
- char body[0];
-};
-
static int nfs_symlink_filler(struct inode *inode, struct page *page)
{
- const unsigned int pgbase = offsetof(struct nfs_symlink, body);
- const unsigned int pglen = PAGE_SIZE - pgbase;
int error;
lock_kernel();
- error = NFS_PROTO(inode)->readlink(inode, page, pgbase, pglen);
+ error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
unlock_kernel();
if (error < 0)
goto error;
return -EIO;
}
-static int nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
struct page *page;
- struct nfs_symlink *p;
void *err = ERR_PTR(nfs_revalidate_inode(NFS_SERVER(inode), inode));
if (err)
goto read_failed;
err = ERR_PTR(-EIO);
goto getlink_read_error;
}
- p = kmap(page);
- p->page = page;
- nd_set_link(nd, p->body);
- return 0;
+ nd_set_link(nd, kmap(page));
+ return page;
getlink_read_error:
page_cache_release(page);
read_failed:
nd_set_link(nd, err);
- return 0;
+ return NULL;
}
-static void nfs_put_link(struct dentry *dentry, struct nameidata *nd)
+static void nfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
- char *s = nd_get_link(nd);
- if (!IS_ERR(s)) {
- struct nfs_symlink *p;
- struct page *page;
-
- p = container_of(s, struct nfs_symlink, body[0]);
- page = p->page;
-
+ if (cookie) {
+ struct page *page = cookie;
kunmap(page);
page_cache_release(page);
}
if (xdr_decode_word(buf, base, &entries) ||
entries > NFS_ACL_MAX_ENTRIES)
return -EINVAL;
+ nfsacl_desc.desc.array_maxlen = entries;
err = xdr_decode_array2(buf, base + 4, &nfsacl_desc.desc);
if (err)
return err;
svc_exit_thread(rqstp);
/* Release module */
+ unlock_kernel();
module_put_and_exit(0);
}
fact that the vfs and ntfs inodes are one struct in memory to find
the ntfs inode in memory if present. Also, the ntfs inode has its
own locking so it does not matter if the vfs inode is locked.
+ - Fix bug in mft record writing where we forgot to set the device in
+ the buffers when mapping them after the VM had discarded them.
+ Thanks to Martin MOKREJÅ for the bug report.
2.1.22 - Many bug and race fixes and error handling improvements.
LCN lcn;
unsigned int vcn_ofs;
+ bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = (VCN)block << bh_size_bits;
vcn_ofs = vcn & vol->cluster_size_mask;
LCN lcn;
unsigned int vcn_ofs;
+ bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
LCN lcn;
unsigned int vcn_ofs;
+ bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
};
#endif /* CONFIG_SECCOMP */
-static int proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
int error = -EACCES;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
nd->last_type = LAST_BIND;
out:
- return error;
+ return ERR_PTR(error);
}
static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
return vfs_readlink(dentry,buffer,buflen,tmp);
}
-static int proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char tmp[30];
sprintf(tmp, "%d", current->tgid);
- return vfs_follow_link(nd,tmp);
+ return ERR_PTR(vfs_follow_link(nd,tmp));
}
static struct inode_operations proc_self_inode_operations = {
spin_unlock(&proc_inum_lock);
}
-static int proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
{
nd_set_link(nd, PDE(dentry->d_inode)->data);
- return 0;
+ return NULL;
}
static struct inode_operations proc_link_inode_operations = {
* iput doesn't deadlock in reiserfs_delete_xattrs. The locking
* code really needs to be reworked, but this will take care of it
* for now. -jeffm */
- if (REISERFS_I(dir)->i_acl_default) {
+ if (REISERFS_I(dir)->i_acl_default && !IS_ERR(REISERFS_I(dir)->i_acl_default)) {
reiserfs_write_unlock_xattrs(dir->i_sb);
iput(inode);
reiserfs_write_lock_xattrs(dir->i_sb);
*/
inode->i_uid = current->fsuid;
inode->i_mode = mode;
+ /* Make inode invalid - just in case we are going to drop it before
+ * the initialization happens */
+ INODE_PKEY(inode)->k_objectid = 0;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
return smb_proc_symlink(server_from_dentry(dentry), dentry, oldname);
}
-static int smb_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char *link = __getname();
DEBUG1("followlink of %s/%s\n", DENTRY_PATH(dentry));
}
}
nd_set_link(nd, link);
- return 0;
+ return NULL;
}
-static void smb_put_link(struct dentry *dentry, struct nameidata *nd)
+static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
if (!IS_ERR(s))
struct sysfs_dirent * sd;
struct sysfs_dirent * parent_sd = dir->d_fsdata;
+ if (dir->d_inode == NULL)
+ /* no inode means this hasn't been made visible yet */
+ return;
+
down(&dir->d_inode->i_sem);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
}
-static int sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
int error = -ENOMEM;
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (page)
error = sysfs_getlink(dentry, (char *) page);
nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
- return 0;
+ return NULL;
}
-static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd)
+static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
char *page = nd_get_link(nd);
if (!IS_ERR(page))
#include "sysv.h"
#include <linux/namei.h>
-static int sysv_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *sysv_follow_link(struct dentry *dentry, struct nameidata *nd)
{
nd_set_link(nd, (char *)SYSV_I(dentry->d_inode)->i_data);
- return 0;
+ return NULL;
}
struct inode_operations sysv_fast_symlink_inode_operations = {
#include <linux/namei.h>
#include <linux/ufs_fs.h>
-static int ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct ufs_inode_info *p = UFS_I(dentry->d_inode);
nd_set_link(nd, (char*)p->i_u1.i_symlink);
- return 0;
+ return NULL;
}
struct inode_operations ufs_fast_symlink_inode_operations = {
* we need to be very careful about how much stack we use.
* uio is kmalloced for this reason...
*/
-STATIC int
+STATIC void *
linvfs_follow_link(
struct dentry *dentry,
struct nameidata *nd)
link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL);
if (!link) {
nd_set_link(nd, ERR_PTR(-ENOMEM));
- return 0;
+ return NULL;
}
uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL);
if (!uio) {
kfree(link);
nd_set_link(nd, ERR_PTR(-ENOMEM));
- return 0;
+ return NULL;
}
vp = LINVFS_GET_VP(dentry->d_inode);
kfree(uio);
nd_set_link(nd, link);
- return 0;
+ return NULL;
}
-static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd)
+static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
if (!IS_ERR(s))
if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
-static inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return __xchg_u8(ptr, x);
- case 2:
- return __xchg_u16(ptr, x);
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
+#define __xchg(ptr, x, size) \
+({ \
+ unsigned long __xchg__res; \
+ volatile void *__xchg__ptr = (ptr); \
+ switch (size) { \
+ case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
+ case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
+ case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
+ case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
+ default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+ } \
+ __xchg__res; \
+})
#define xchg(ptr,x) \
({ \
/*
* We use IXP425 General purpose timer for our timer needs, it runs at
- * 66.66... MHz
+ * 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
+ * timer register ignores the bottom 2 bits of the LATCH value.
*/
-#define CLOCK_TICK_RATE (66666666)
+#define FREQ 66666666
+#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
-extern volatile void __bug(const char *file, int line, void *data);
+extern void __bug(const char *file, int line, void *data) __attribute__((noreturn));
/* give file/line information */
#define BUG() __bug(__FILE__, __LINE__, NULL)
/*
* Special stuff for a reset
*/
- volatile void (*reset)(unsigned long addr);
+ void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte(pte_t *ptep, pte_t pte);
-extern volatile void cpu_reset(unsigned long addr);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#endif
#define __NR_vserver (__NR_SYSCALL_BASE+313)
+#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
+#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)
+#define __NR_inotify_init (__NR_SYSCALL_BASE+316)
+#define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317)
+#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318)
/*
* The following SWIs are ARM private.
#define pcibios_scan_all_fns(a, b) 0
extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x4000
+#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM (pci_mem_start)
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
};
#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
+ (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) \
(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
extern u16 ia64_acpiid_to_sapicid[];
+/*
+ * Refer Intel ACPI _PDC support document for bit definitions
+ */
+#define ACPI_PDC_EST_CAPABILITY_SMP 0x8
+
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
-#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff))
+#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
struct io_space {
unsigned long mmio_base; /* base in MMIO space */
ia64_mfa();
}
-static inline const unsigned long
-__ia64_get_io_port_base (void)
-{
- extern unsigned long ia64_iobase;
-
- return ia64_iobase;
-}
-
static inline void*
__ia64_mk_io_addr (unsigned long port)
{
unsigned int gsi_base);
#ifdef CONFIG_HOTPLUG
extern int iosapic_remove (unsigned int gsi_base);
+#else
+#define iosapic_remove(gsi_base) (-EINVAL)
#endif /* CONFIG_HOTPLUG */
extern int gsi_to_vector (unsigned int gsi);
extern int gsi_to_irq (unsigned int gsi);
#else
#define iosapic_system_init(pcat_compat) do { } while (0)
#define iosapic_init(address,gsi_base) (-EINVAL)
-#ifdef CONFIG_HOTPLUG
#define iosapic_remove(gsi_base) (-ENODEV)
-#endif /* CONFIG_HOTPLUG */
#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
#define iosapic_unregister_intr(irq) do { } while (0)
#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
#define __MMU_H
/*
- * Type for a context number. We declare it volatile to ensure proper ordering when it's
- * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
- * init_new_context()).
+ * Type for a context number. We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
*/
typedef volatile unsigned long mm_context_t;
+typedef unsigned long nv_mm_context_t;
+
#endif
delayed_tlb_flush (void)
{
extern void local_flush_tlb_all (void);
+ unsigned long flags;
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
- local_flush_tlb_all();
- __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ {
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+ local_flush_tlb_all();
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
}
-static inline mm_context_t
+static inline nv_mm_context_t
get_mmu_context (struct mm_struct *mm)
{
unsigned long flags;
- mm_context_t context = mm->context;
-
- if (context)
- return context;
-
- spin_lock_irqsave(&ia64_ctx.lock, flags);
- {
- /* re-check, now that we've got the lock: */
- context = mm->context;
- if (context == 0) {
- cpus_clear(mm->cpu_vm_mask);
- if (ia64_ctx.next >= ia64_ctx.limit)
- wrap_mmu_context(mm);
- mm->context = context = ia64_ctx.next++;
+ nv_mm_context_t context = mm->context;
+
+ if (unlikely(!context)) {
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ {
+ /* re-check, now that we've got the lock: */
+ context = mm->context;
+ if (context == 0) {
+ cpus_clear(mm->cpu_vm_mask);
+ if (ia64_ctx.next >= ia64_ctx.limit)
+ wrap_mmu_context(mm);
+ mm->context = context = ia64_ctx.next++;
+ }
}
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
- spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+ /*
+ * Ensure we're not starting to use "context" before any old
+ * uses of it are gone from our TLB.
+ */
+ delayed_tlb_flush();
+
return context;
}
}
static inline void
-reload_context (mm_context_t context)
+reload_context (nv_mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
static inline void
activate_context (struct mm_struct *mm)
{
- mm_context_t context;
+ nv_mm_context_t context;
do {
context = get_mmu_context(mm);
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
- delayed_tlb_flush();
-
/*
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.
#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
#define PAL_VM_TR_READ 261 /* read contents of translation register */
+#define PAL_GET_PSTATE 262 /* get the current P-state */
+#define PAL_SET_PSTATE 263 /* set the P-state */
#ifndef __ASSEMBLY__
return iprv.status;
}
+/* Get the current P-state information */
+static inline s64
+ia64_pal_get_pstate (u64 *pstate_index)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0);
+ *pstate_index = iprv.v0;
+ return iprv.status;
+}
+
+/* Set the P-state */
+static inline s64
+ia64_pal_set_pstate (u64 pstate_index)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
+ return iprv.status;
+}
+
/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
* suspended, but cache and TLB coherency is maintained.
*/
*
* Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
* Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
*
* Based on asm-i386/rwsem.h and other architecture implementation.
*
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case
- * of an uncontended lock. Readers increment by 1 and see a positive value
- * when uncontended, negative if there are writers (and maybe) readers
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
+ * the case of an uncontended lock. Readers increment by 1 and see a positive
+ * value when uncontended, negative if there are writers (and maybe) readers
* waiting (in which case it goes to sleep).
*/
* the semaphore definition
*/
struct rw_semaphore {
- signed int count;
+ signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
#if RWSEM_DEBUG
#endif
};
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
+#define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001)
+#define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff)
+#define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
static inline void
__down_read (struct rw_semaphore *sem)
{
- int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
+ long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
if (result < 0)
rwsem_down_read_failed(sem);
static inline void
__down_write (struct rw_semaphore *sem)
{
- int old, new;
+ long old, new;
do {
old = sem->count;
static inline void
__up_read (struct rw_semaphore *sem)
{
- int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
+ long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
static inline void
__up_write (struct rw_semaphore *sem)
{
- int old, new;
+ long old, new;
do {
old = sem->count;
static inline int
__down_read_trylock (struct rw_semaphore *sem)
{
- int tmp;
+ long tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
return 1;
static inline int
__down_write_trylock (struct rw_semaphore *sem)
{
- int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
+ long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
static inline void
__downgrade_write (struct rw_semaphore *sem)
{
- int old, new;
+ long old, new;
do {
old = sem->count;
* Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/
-#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
+#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
#endif /* _ASM_IA64_RWSEM_H */
# endif /* CONFIG_MCKINLEY */
#endif
}
+
#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+
+/* Unlock by doing an ordered store and releasing the cacheline with nta */
+static inline void _raw_spin_unlock(spinlock_t *x) {
+ barrier();
+ asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
+}
+
#else /* !ASM_SUPPORTED */
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
# define _raw_spin_lock(x) \
} while (ia64_spinlock_val); \
} \
} while (0)
+#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#endif /* !ASM_SUPPORTED */
#define spin_is_locked(x) ((x)->lock != 0)
-#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct {
- volatile unsigned int read_counter : 31;
- volatile unsigned int write_lock : 1;
+ volatile unsigned int read_counter : 24;
+ volatile unsigned int write_lock : 8;
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
(result == 0); \
})
+static inline void _raw_write_unlock(rwlock_t *x)
+{
+ u8 *y = (u8 *)x;
+ barrier();
+ asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
+}
+
#else /* !ASM_SUPPORTED */
#define _raw_write_lock(l) \
(ia64_val == 0); \
})
+static inline void _raw_write_unlock(rwlock_t *x)
+{
+ barrier();
+ x->write_lock = 0;
+}
+
#endif /* !ASM_SUPPORTED */
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-#define _raw_write_unlock(x) \
-({ \
- smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
- clear_bit(31, (x)); \
-})
-
#endif /* _ASM_IA64_SPINLOCK_H */
* Some lowlevel functions might want to know about
* the real CPU ID <-> CPU # mapping.
*/
-extern volatile int physid_2_cpu[NR_CPUS];
extern volatile int cpu_2_physid[NR_CPUS];
-#define physid_to_cpu(physid) physid_2_cpu[physid]
#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
#define raw_smp_processor_id() (current_thread_info()->cpu)
#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
#else
-#define __pa(vaddr) virt_to_phys((void *)vaddr)
-#define __va(paddr) phys_to_virt((unsigned long)paddr)
+#define __pa(vaddr) virt_to_phys((void *)(vaddr))
+#define __va(paddr) phys_to_virt((unsigned long)(paddr))
#endif
#else /* !CONFIG_SUN3 */
/* This #define is a horrible hack to suppress lots of warnings. --m */
-#define __pa(x) ___pa((unsigned long)x)
+#define __pa(x) ___pa((unsigned long)(x))
static inline unsigned long ___pa(unsigned long x)
{
if(x == 0)
#define MQ0_CONFIG_SIZE_2G 0x0000c000
/* Internal SRAM Controller 440GX/440SP */
-#ifdef CONFIG_440SP
-#define DCRN_SRAM0_BASE 0x100
-#else /* 440GX */
#define DCRN_SRAM0_BASE 0x000
-#endif
#define DCRN_SRAM0_SB0CR (DCRN_SRAM0_BASE + 0x020)
#define DCRN_SRAM0_SB1CR (DCRN_SRAM0_BASE + 0x021)
#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
-#elif defined(CONFIG_STBXXX_DMA) /* stb03xxx */
+#elif defined(CONFIG_STB03xxx) /* stb03xxx */
#define DMA_PPC4xx_SIZE 4096
/* Accessor functions for the timebase (RTC on 601) registers. */
/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
#ifdef CONFIG_6xx
-extern __inline__ int const __USE_RTC(void) {
+extern __inline__ int __attribute_pure__ __USE_RTC(void) {
return (mfspr(SPRN_PVR)>>16) == 1;
}
#else
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%1,%2,%3\n" \
".previous" \
- : : "r" (x), "i" (__LINE__), "i" (__FILE__), \
- "i" (__FUNCTION__)); \
+ : : "r" ((long long)(x)), "i" (__LINE__), \
+ "i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#define WARN_ON(x) do { \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%1,%2,%3\n" \
".previous" \
- : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
+ : : "r" ((long long)(x)), \
+ "i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#ifndef _LPARMAP_H
#define _LPARMAP_H
+#ifndef __ASSEMBLY__
+
#include <asm/types.h>
/*
} xRanges[HvRangesToMap];
};
-extern struct LparMap xLparMap;
+extern const struct LparMap xLparMap;
+
+#endif /* __ASSEMBLY__ */
+
+/* the fixed address where the LparMap exists */
+#define LPARMAP_PHYS 0x7000
#endif /* _LPARMAP_H */
})
#endif
-#ifndef __CHECKER__
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x = (x); \
int __pu_err; \
+ __chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
case 2: \
} \
__pu_err; \
})
-#else
-#define __put_user(x, ptr) \
-({ \
- void __user *p; \
- p = (ptr); \
- 0; \
-})
-#endif
#define put_user(x, ptr) \
({ \
})
#endif
-#ifndef __CHECKER__
#define __get_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x; \
int __gu_err; \
+ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
(x) = __x; \
__gu_err; \
})
-#else
-#define __get_user(x, ptr) \
-({ \
- void __user *p; \
- p = (ptr); \
- 0; \
-})
-#endif
-
#define get_user(x, ptr) \
({ \
#define __NR_add_key 285
#define __NR_request_key 286
#define __NR_keyctl 287
+#define __NR_ioprio_set 288
+#define __NR_ioprio_get 289
+#define __NR_inotify_init 290
+#define __NR_inotify_add_watch 291
+#define __NR_inotify_rm_watch 292
-#define NR_syscalls 288
+
+#define NR_syscalls 293
/* user-visible error numbers are in the range -1 - -124: see <asm-sh/errno.h> */
register long __sc7 __asm__ ("r7") = (long) arg4; \
register long __sc0 __asm__ ("r0") = (long) arg5; \
register long __sc1 __asm__ ("r1") = (long) arg6; \
-__asm__ __volatile__ ("trapa #0x15" \
+__asm__ __volatile__ ("trapa #0x16" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
"r" (__sc3), "r" (__sc1) \
#define __NR_add_key 313
#define __NR_request_key 314
#define __NR_keyctl 315
+#define __NR_ioprio_set 316
+#define __NR_ioprio_get 317
+#define __NR_inotify_init 318
+#define __NR_inotify_add_watch 319
+#define __NR_inotify_rm_watch 320
-#define NR_syscalls 316
+#define NR_syscalls 321
/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */
struct restart_block restart_block;
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
unsigned long fpregs[0] __attribute__ ((aligned(64)));
};
#define TI_PCR 0x00000490
#define TI_CEE_STUFF 0x00000498
#define TI_RESTART_BLOCK 0x000004a0
+#define TI_KUNA_REGS 0x000004c8
+#define TI_KUNA_INSN 0x000004d0
#define TI_FPREGS 0x00000500
/* We embed this in the uppermost byte of thread_info->flags */
* casting is the right thing, but 32-bit UML can't have 64-bit virtual
* addresses
*/
-#define __pa(virt) to_phys((void *) (unsigned long) virt)
-#define __va(phys) to_virt((unsigned long) phys)
+#define __pa(virt) to_phys((void *) (unsigned long) (virt))
+#define __va(phys) to_virt((unsigned long) (phys))
#define page_to_pfn(page) ((page) - mem_map)
#define pfn_to_page(pfn) (mem_map + (pfn))
extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
extern void e820_setup_gap(void);
+extern unsigned long e820_hole_size(unsigned long start_pfn,
+ unsigned long end_pfn);
extern void __init parse_memopt(char *p, char **end);
extern int no_iommu, force_iommu;
extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x4000
+#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM (pci_mem_start)
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
#define ID_MASK 0x00200000
#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
+ (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) \
(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char __user *,int);
- int (*follow_link) (struct dentry *, struct nameidata *);
- void (*put_link) (struct dentry *, struct nameidata *);
+ void * (*follow_link) (struct dentry *, struct nameidata *);
+ void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int, struct nameidata *);
int (*setattr) (struct dentry *, struct iattr *);
extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
extern int vfs_follow_link(struct nameidata *, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
-extern int page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *);
+extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+extern void page_put_link(struct dentry *, struct nameidata *, void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern struct inode_operations page_symlink_inode_operations;
extern int generic_readlink(struct dentry *, char __user *, int);
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
const char *old_name, const char *new_name,
- int isdir, struct inode *target)
+ int isdir, struct inode *target, struct inode *source)
{
u32 cookie = inotify_get_cookie();
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL);
inotify_inode_is_dead(target);
}
+
+ if (source) {
+ inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL);
+ }
}
/*
#define IN_CREATE 0x00000100 /* Subfile was created */
#define IN_DELETE 0x00000200 /* Subfile was deleted */
#define IN_DELETE_SELF 0x00000400 /* Self was deleted */
+#define IN_MOVE_SELF 0x00000800 /* Self was moved */
/* the following are legal events. they are sent as needed to any watch */
#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */
*/
#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
- IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF)
+ IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \
+ IN_MOVE_SELF)
#ifdef __KERNEL__
#include <linux/netdevice.h>
#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
#include <linux/list.h>
struct netpoll;
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
+ int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
return ret;
}
-static inline void netpoll_poll_lock(struct net_device *dev)
+static inline void *netpoll_poll_lock(struct net_device *dev)
{
+ rcu_read_lock(); /* deal with race on ->npinfo */
if (dev->npinfo) {
spin_lock(&dev->npinfo->poll_lock);
dev->npinfo->poll_owner = smp_processor_id();
+ return dev->npinfo;
}
+ return NULL;
}
-static inline void netpoll_poll_unlock(struct net_device *dev)
+static inline void netpoll_poll_unlock(void *have)
{
- if (dev->npinfo) {
- dev->npinfo->poll_owner = -1;
- spin_unlock(&dev->npinfo->poll_lock);
+ struct netpoll_info *npi = have;
+
+ if (npi) {
+ npi->poll_owner = -1;
+ spin_unlock(&npi->poll_lock);
}
+ rcu_read_unlock();
}
#else
#define netpoll_rx(a) 0
-#define netpoll_poll_lock(a)
+#define netpoll_poll_lock(a) 0
#define netpoll_poll_unlock(a)
#endif
/*
* Various flags
*/
- unsigned int flags;
+ unsigned long flags; /* atomic bit ops */
+ unsigned long cache_validity; /* bit mask */
/*
* read_cache_jiffies is when we started read-caching this inode,
/* Open contexts for shared mmap writes */
struct list_head open_files;
- wait_queue_head_t nfs_i_wait;
-
#ifdef CONFIG_NFS_V4
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
};
/*
- * Legal inode flag values
+ * Cache validity bit flags
*/
-#define NFS_INO_STALE 0x0001 /* possible stale inode */
-#define NFS_INO_ADVISE_RDPLUS 0x0002 /* advise readdirplus */
-#define NFS_INO_REVALIDATING 0x0004 /* revalidating attrs */
-#define NFS_INO_INVALID_ATTR 0x0008 /* cached attrs are invalid */
-#define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */
-#define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */
-#define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */
-#define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */
+#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
+#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
+#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
+#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
+#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
+#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
+
+/*
+ * Bit offsets in flags field
+ */
+#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
+#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
+#define NFS_INO_STALE (2) /* possible stale inode */
static inline struct nfs_inode *NFS_I(struct inode *inode)
{
#define NFS_ATTRTIMEO_UPDATE(inode) (NFS_I(inode)->attrtimeo_timestamp)
#define NFS_FLAGS(inode) (NFS_I(inode)->flags)
-#define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING)
-#define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE)
+#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode)))
#define NFS_FILEID(inode) (NFS_I(inode)->fileid)
static inline void NFS_CACHEINV(struct inode *inode)
{
- if (!nfs_caches_unstable(inode))
- NFS_FLAGS(inode) |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+ if (!nfs_caches_unstable(inode)) {
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+ spin_unlock(&inode->i_lock);
+ }
}
static inline int nfs_server_capable(struct inode *inode, int cap)
static inline int NFS_USE_READDIRPLUS(struct inode *inode)
{
- return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS;
+ return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
}
/**
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
extern void nfs_begin_attr_update(struct inode *);
extern void nfs_end_attr_update(struct inode *);
extern void nfs_begin_data_update(struct inode *);
/* keep track of device state */
unsigned int is_enabled:1; /* pci_enable_device has been called */
unsigned int is_busmaster:1; /* device is busmaster */
-
+ unsigned int no_msi:1; /* device may not use msi */
+
u32 saved_config_space[16]; /* config space saved at suspend time */
struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
int rom_attr_enabled; /* has display of the rom attribute been enabled? */
#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
#define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030
#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
-#define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033
+#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033
#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034
#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b
#define PCI_DEVICE_ID_APPLE_KEYLARGO_I 0x003e
#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
#define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220
#define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_DEVICE_ID_BCM1250_HT 0x0002
+#define PCI_VENDOR_ID_NETCELL 0x169c
+#define PCI_DEVICE_ID_REVOLUTION 0x0044
+
#define PCI_VENDOR_ID_LINKSYS 0x1737
#define PCI_DEVICE_ID_LINKSYS_EG1032 0x1032
#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_21145 0x0039
+#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
nohdr:1;
/* 3 bits spare */
__u8 pkt_type;
- __u16 protocol;
+ __be16 protocol;
void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
struct xdr_array2_desc {
unsigned int elem_size;
unsigned int array_len;
+ unsigned int array_maxlen;
xdr_xcode_elem_t xcode;
};
#define AX25_DEF_DS_TIMEOUT (3 * 60 * HZ) /* DAMA timeout 3 minutes */
typedef struct ax25_uid_assoc {
- struct ax25_uid_assoc *next;
+ struct hlist_node uid_node;
+ atomic_t refcount;
uid_t uid;
ax25_address call;
} ax25_uid_assoc;
+#define ax25_uid_for_each(__ax25, node, list) \
+ hlist_for_each_entry(__ax25, node, list, uid_node)
+
+#define ax25_uid_hold(ax25) \
+ atomic_inc(&((ax25)->refcount))
+
+static inline void ax25_uid_put(ax25_uid_assoc *assoc)
+{
+ if (atomic_dec_and_test(&assoc->refcount)) {
+ kfree(assoc);
+ }
+}
+
typedef struct {
ax25_address calls[AX25_MAX_DIGIS];
unsigned char repeated[AX25_MAX_DIGIS];
/* ax25_uid.c */
extern int ax25_uid_policy;
-extern ax25_address *ax25_findbyuid(uid_t);
+extern ax25_uid_assoc *ax25_findbyuid(uid_t);
extern int ax25_uid_ioctl(int, struct sockaddr_ax25 *);
extern struct file_operations ax25_uid_fops;
extern void ax25_uid_free(void);
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
};
+static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+{
+ nsk->sk_flags = osk->sk_flags;
+}
+
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
__set_bit(flag, &sk->sk_flags);
/* isadma.c */
+#ifdef CONFIG_ISA_DMA_API
#define DMA_MODE_NO_ENABLE 0x0100
void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode);
void snd_dma_disable(unsigned long dma);
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
+#endif
/* misc.c */
* Call with cpuset_sem held. May nest a call to the
* lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
*/
+
+/*
+ * Hack to avoid 2.6.13 partial node dynamic sched domain bug.
+ * Disable letting 'cpu_exclusive' cpusets define dynamic sched
+ * domains, until the sched domain can handle partial nodes.
+ * Remove this #if hackery when sched domains fixed.
+ */
+#if 0
static void update_cpu_domains(struct cpuset *cur)
{
struct cpuset *c, *par = cur->parent;
partition_sched_domains(&pspan, &cspan);
unlock_cpu_hotplug();
}
+#else
+static void update_cpu_domains(struct cpuset *cur)
+{
+}
+#endif
static int update_cpumask(struct cpuset *cs, char *buf)
{
*/
int can_nice(const task_t *p, const int nice)
{
- /* convert nice value [19,-20] to rlimit style value [0,39] */
- int nice_rlim = 19 - nice;
+ /* convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = 20 - nice;
return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
capable(CAP_SYS_NICE));
}
{
struct task_struct *t;
- if (p->flags & SIGNAL_GROUP_EXIT)
+ if (p->signal->flags & SIGNAL_GROUP_EXIT)
/*
* The process is in the middle of dying already.
*/
parent = me->group_leader->real_parent;
for (;;) {
pid = parent->tgid;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
{
struct task_struct *old = parent;
}
/**
- * idr_get_new_above - allocate new idr entry above a start id
+ * idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the ide
* @start_id: id to start search at
int qualifier; /* 'h', 'l', or 'L' for integer fields */
/* 'z' support added 23/7/1999 S.H. */
/* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
/* Reject out-of-range values early */
if (unlikely((int) size < 0)) {
/* get the conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt =='Z' || *fmt == 'z') {
+ *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
qualifier = *fmt;
++fmt;
if (qualifier == 'l' && *fmt == 'l') {
num = (signed long) num;
} else if (qualifier == 'Z' || qualifier == 'z') {
num = va_arg(args, size_t);
+ } else if (qualifier == 't') {
+ num = va_arg(args, ptrdiff_t);
} else if (qualifier == 'h') {
num = (unsigned short) va_arg(args, int);
if (flags & SIGN)
{
*t = NULL;
*m = 0;
- return Z_DATA_ERROR;
+ return Z_OK;
}
return 0;
}
-static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
{
nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
- return 0;
+ return NULL;
}
-static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
- return 0;
+ return page;
}
-static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
+static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
if (!IS_ERR(nd_get_link(nd))) {
- struct page *page;
-
- page = find_get_page(dentry->d_inode->i_mapping, 0);
- if (!page)
- BUG();
+ struct page *page = cookie;
kunmap(page);
mark_page_accessed(page);
page_cache_release(page);
- page_cache_release(page);
}
}
unsigned int hash;
struct rif_cache *entry;
unsigned char *olddata;
+ unsigned long flags;
static const unsigned char mcast_func_addr[]
= {0xC0,0x00,0x00,0x04,0x00,0x00};
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
/*
* Broadcasts are single route as stated in RFC 1042
else
slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
olddata = skb->data;
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
skb_pull(skb, slack);
memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
{
unsigned int hash, rii_p = 0;
+ unsigned long flags;
struct rif_cache *entry;
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
/*
* Firstly see if the entry exists
if(!entry)
{
printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
return;
}
}
entry->last_used=jiffies;
}
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
}
/*
static void rif_check_expire(unsigned long dummy)
{
int i;
- unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2;
+ unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
for(i =0; i < RIF_TABLE_SIZE; i++) {
struct rif_cache *entry, **pentry;
}
}
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
mod_timer(&rif_timer, next_interval);
static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
{
- spin_lock_bh(&rif_lock);
+ spin_lock_irq(&rif_lock);
return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void rif_seq_stop(struct seq_file *seq, void *v)
{
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irq(&rif_lock);
}
static int rif_seq_show(struct seq_file *seq, void *v)
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_sleep = osk->sk_sleep;
-
- if (sock_flag(osk, SOCK_DBG))
- sock_set_flag(sk, SOCK_DBG);
-
- if (sock_flag(osk, SOCK_ZAPPED))
- sock_set_flag(sk, SOCK_ZAPPED);
+ sock_copy_flags(sk, osk);
oax25 = ax25_sk(osk);
struct sock *sk = sock->sk;
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
ax25_dev *ax25_dev = NULL;
- ax25_address *call;
+ ax25_uid_assoc *user;
+ ax25_address call;
ax25_cb *ax25;
int err = 0;
if (addr->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
- call = ax25_findbyuid(current->euid);
- if (call == NULL && ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
- return -EACCES;
+ user = ax25_findbyuid(current->euid);
+ if (user) {
+ call = user->call;
+ ax25_uid_put(user);
+ } else {
+ if (ax25_uid_policy && !capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ call = addr->fsa_ax25.sax25_call;
}
lock_sock(sk);
goto out;
}
- if (call == NULL)
- ax25->source_addr = addr->fsa_ax25.sax25_call;
- else
- ax25->source_addr = *call;
+ ax25->source_addr = call;
/*
* User already set interface with SO_BINDTODEVICE
*/
int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
{
+ ax25_uid_assoc *user;
ax25_route *ax25_rt;
- ax25_address *call;
int err;
if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
goto put;
}
- if ((call = ax25_findbyuid(current->euid)) == NULL) {
+ user = ax25_findbyuid(current->euid);
+ if (user) {
+ ax25->source_addr = user->call;
+ ax25_uid_put(user);
+ } else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
err = -EPERM;
goto put;
}
- call = (ax25_address *)ax25->ax25_dev->dev->dev_addr;
+ ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr;
}
- ax25->source_addr = *call;
-
if (ax25_rt->digipeat != NULL) {
if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
err = -ENOMEM;
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
* Callsign/UID mapper. This is in kernel space for security on multi-amateur machines.
*/
-static ax25_uid_assoc *ax25_uid_list;
+HLIST_HEAD(ax25_uid_list);
static DEFINE_RWLOCK(ax25_uid_lock);
int ax25_uid_policy = 0;
-ax25_address *ax25_findbyuid(uid_t uid)
+ax25_uid_assoc *ax25_findbyuid(uid_t uid)
{
- ax25_uid_assoc *ax25_uid;
- ax25_address *res = NULL;
+ ax25_uid_assoc *ax25_uid, *res = NULL;
+ struct hlist_node *node;
read_lock(&ax25_uid_lock);
- for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) {
+ ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
if (ax25_uid->uid == uid) {
- res = &ax25_uid->call;
+ ax25_uid_hold(ax25_uid);
+ res = ax25_uid;
break;
}
}
read_unlock(&ax25_uid_lock);
- return NULL;
+ return res;
}
int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
{
- ax25_uid_assoc *s, *ax25_uid;
+ ax25_uid_assoc *ax25_uid;
+ struct hlist_node *node;
+ ax25_uid_assoc *user;
unsigned long res;
switch (cmd) {
case SIOCAX25GETUID:
res = -ENOENT;
read_lock(&ax25_uid_lock);
- for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) {
+ ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
res = ax25_uid->uid;
break;
case SIOCAX25ADDUID:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (ax25_findbyuid(sax->sax25_uid))
+ user = ax25_findbyuid(sax->sax25_uid);
+ if (user) {
+ ax25_uid_put(user);
return -EEXIST;
+ }
if (sax->sax25_uid == 0)
return -EINVAL;
if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL)
return -ENOMEM;
+ atomic_set(&ax25_uid->refcount, 1);
ax25_uid->uid = sax->sax25_uid;
ax25_uid->call = sax->sax25_call;
write_lock(&ax25_uid_lock);
- ax25_uid->next = ax25_uid_list;
- ax25_uid_list = ax25_uid;
+ hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list);
write_unlock(&ax25_uid_lock);
return 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ ax25_uid = NULL;
write_lock(&ax25_uid_lock);
- for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) {
- if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
+ ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
break;
- }
}
if (ax25_uid == NULL) {
write_unlock(&ax25_uid_lock);
return -ENOENT;
}
- if ((s = ax25_uid_list) == ax25_uid) {
- ax25_uid_list = s->next;
- write_unlock(&ax25_uid_lock);
- kfree(ax25_uid);
- return 0;
- }
- while (s != NULL && s->next != NULL) {
- if (s->next == ax25_uid) {
- s->next = ax25_uid->next;
- write_unlock(&ax25_uid_lock);
- kfree(ax25_uid);
- return 0;
- }
- s = s->next;
- }
+ hlist_del_init(&ax25_uid->uid_node);
+ ax25_uid_put(ax25_uid);
write_unlock(&ax25_uid_lock);
- return -ENOENT;
+ return 0;
default:
return -EINVAL;
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ax25_uid_assoc *pt;
- int i = 1;
+ struct hlist_node *node;
+ int i = 0;
read_lock(&ax25_uid_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- for (pt = ax25_uid_list; pt != NULL; pt = pt->next) {
+ ax25_uid_for_each(pt, node, &ax25_uid_list) {
if (i == *pos)
return pt;
++i;
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
- return (v == SEQ_START_TOKEN) ? ax25_uid_list :
- ((struct ax25_uid_assoc *) v)->next;
+
+ return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
+ ax25_uid_assoc, uid_node);
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
else {
struct ax25_uid_assoc *pt = v;
-
seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(&pt->call));
}
*/
void __exit ax25_uid_free(void)
{
- ax25_uid_assoc *s, *ax25_uid;
+ ax25_uid_assoc *ax25_uid;
+ struct hlist_node *node;
write_lock(&ax25_uid_lock);
- ax25_uid = ax25_uid_list;
- while (ax25_uid != NULL) {
- s = ax25_uid;
- ax25_uid = ax25_uid->next;
-
- kfree(s);
+ ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ hlist_del_init(&ax25_uid->uid_node);
+ ax25_uid_put(ax25_uid);
}
- ax25_uid_list = NULL;
write_unlock(&ax25_uid_lock);
}
struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies;
int budget = netdev_budget;
-
+ void *have;
+
local_irq_disable();
while (!list_empty(&queue->poll_list)) {
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
- netpoll_poll_lock(dev);
+ have = netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
- netpoll_poll_unlock(dev);
+ netpoll_poll_unlock(have);
local_irq_disable();
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
else
dev->quota = dev->weight;
} else {
- netpoll_poll_unlock(dev);
+ netpoll_poll_unlock(have);
dev_put(dev);
local_irq_disable();
}
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
+#define MAX_RETRIES 20000
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
int status;
struct netpoll_info *npinfo;
-repeat:
- if(!np || !np->dev || !netif_running(np->dev)) {
+ if (!np || !np->dev || !netif_running(np->dev)) {
__kfree_skb(skb);
return;
}
- /* avoid recursion */
npinfo = np->dev->npinfo;
+
+ /* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
np->dev->xmit_lock_owner == smp_processor_id()) {
if (np->drop)
return;
}
- spin_lock(&np->dev->xmit_lock);
- np->dev->xmit_lock_owner = smp_processor_id();
+ do {
+ npinfo->tries--;
+ spin_lock(&np->dev->xmit_lock);
+ np->dev->xmit_lock_owner = smp_processor_id();
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- if (netif_queue_stopped(np->dev)) {
+ /*
+ * network drivers do not expect to be called if the queue is
+ * stopped.
+ */
+ if (netif_queue_stopped(np->dev)) {
+ np->dev->xmit_lock_owner = -1;
+ spin_unlock(&np->dev->xmit_lock);
+ netpoll_poll(np);
+ udelay(50);
+ continue;
+ }
+
+ status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
- netpoll_poll(np);
- goto repeat;
- }
-
- status = np->dev->hard_start_xmit(skb, np->dev);
- np->dev->xmit_lock_owner = -1;
- spin_unlock(&np->dev->xmit_lock);
+ /* success */
+ if(!status) {
+ npinfo->tries = MAX_RETRIES; /* reset */
+ return;
+ }
- /* transmit busy */
- if(status) {
+ /* transmit busy */
netpoll_poll(np);
- goto repeat;
- }
+ udelay(50);
+ } while (npinfo->tries > 0);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
unsigned char *arp_ptr;
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
u32 sip, tip;
- unsigned long flags;
struct sk_buff *send_skb;
struct netpoll *np = NULL;
- spin_lock_irqsave(&npinfo->rx_lock, flags);
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
np = npinfo->rx_np;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
if (!np)
return;
if (!npinfo)
goto release;
+ npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
npinfo->poll_owner = -1;
+ npinfo->tries = MAX_RETRIES;
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
} else
npinfo = ndev->npinfo;
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
+
+ /* fill up the skb queue */
+ refill_skbs();
+
/* last thing to do is link it to the net device structure */
ndev->npinfo = npinfo;
+ /* avoid racing with NAPI reading npinfo */
+ synchronize_rcu();
+
return 0;
release:
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
if (dn_db->use_long)
neigh->ops = &dn_long_ops;
else
neigh->ops = &dn_short_ops;
+ rcu_read_unlock();
if (dn->flags & DN_NDFLAG_P3)
neigh->ops = &dn_phase3_ops;
}
static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *plen, const struct flowi *flp,
- struct fib_result *res, int *err)
+ struct fib_result *res)
{
- int i;
+ int err, i;
t_key mask;
struct leaf_info *li;
struct hlist_head *hhead = &l->list;
if (l->key != (key & mask))
continue;
- if (((*err) = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) == 0) {
+ if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) {
*plen = i;
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats.semantic_match_passed++;
#endif
- return 1;
+ return err;
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats.semantic_match_miss++;
#endif
}
- return 0;
+ return 1;
}
static int
/* Just a leaf? */
if (IS_LEAF(n)) {
- if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret))
+ if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
goto found;
goto failed;
}
continue;
}
if (IS_LEAF(n)) {
- if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret))
+ if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
goto found;
}
backtrace:
{
struct sk_buff *skb;
- ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
- icmp_param->data_len+icmp_param->head_len,
- icmp_param->head_len,
- ipc, rt, MSG_DONTWAIT);
-
- if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+ if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+ icmp_param->data_len+icmp_param->head_len,
+ icmp_param->head_len,
+ ipc, rt, MSG_DONTWAIT) < 0)
+ ip_flush_pending_frames(icmp_socket->sk);
+ else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = skb->h.icmph;
unsigned int csum = 0;
struct sk_buff *skb1;
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
* interval depending on the total number of entries (more entries,
* less interval). */
- peer_periodic_timer.expires = jiffies
- + inet_peer_gc_maxtime
- - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
- peer_total / inet_peer_threshold * HZ;
+ if (peer_total >= inet_peer_threshold)
+ peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
+ else
+ peer_periodic_timer.expires = jiffies
+ + inet_peer_gc_maxtime
+ - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
+ peer_total / inet_peer_threshold * HZ;
add_timer(&peer_periodic_timer);
}
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
struct crypto_tfm *tfm;
break;
case IPQ_COPY_PACKET:
+ if (entry->skb->ip_summed == CHECKSUM_HW &&
+ (*errp = skb_checksum_help(entry->skb,
+ entry->info->outdev == NULL))) {
+ read_unlock_bh(&queue_lock);
+ return NULL;
+ }
if (copy_range == 0 || copy_range > entry->skb->len)
data_len = entry->skb->len;
else
if (!skb_ip_make_writable(&e->skb, v->data_len))
return -ENOMEM;
memcpy(e->skb->data, v->payload, v->data_len);
+ e->skb->ip_summed = CHECKSUM_NONE;
e->skb->nfcache |= NFC_ALTERED;
/*
if (!tcph)
return 0;
- if (!(einfo->operation & IPT_ECN_OP_SET_ECE
- || tcph->ece == einfo->proto.tcp.ece)
- && (!(einfo->operation & IPT_ECN_OP_SET_CWR
- || tcph->cwr == einfo->proto.tcp.cwr)))
+ if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) ||
+ tcph->ece == einfo->proto.tcp.ece) &&
+ ((!(einfo->operation & IPT_ECN_OP_SET_CWR) ||
+ tcph->cwr == einfo->proto.tcp.cwr)))
return 1;
if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
return 0;
tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4;
+ if ((*pskb)->ip_summed == CHECKSUM_HW &&
+ skb_checksum_help(*pskb, inward))
+ return 0;
+
diffs[0] = ((u_int16_t *)tcph)[6];
if (einfo->operation & IPT_ECN_OP_SET_ECE)
tcph->ece = einfo->proto.tcp.ece;
diffs[1] = ((u_int16_t *)tcph)[6];
diffs[0] = diffs[0] ^ 0xFFFF;
- if ((*pskb)->ip_summed != CHECKSUM_HW)
+ if ((*pskb)->ip_summed != CHECKSUM_UNNECESSARY)
tcph->check = csum_fold(csum_partial((char *)diffs,
sizeof(diffs),
tcph->check^0xFFFF));
- else
- if (skb_checksum_help(*pskb, inward))
- return 0;
(*pskb)->nfcache |= NFC_ALTERED;
return 1;
}
if (!skb_ip_make_writable(pskb, (*pskb)->len))
return NF_DROP;
+ if ((*pskb)->ip_summed == CHECKSUM_HW &&
+ skb_checksum_help(*pskb, out == NULL))
+ return NF_DROP;
+
iph = (*pskb)->nh.iph;
tcplen = (*pskb)->len - iph->ihl*4;
newmss);
retmodified:
- /* We never hw checksum SYN packets. */
- BUG_ON((*pskb)->ip_summed == CHECKSUM_HW);
-
(*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED;
return IPT_CONTINUE;
}
sk_charge_skb(sk, skb);
if (!sk->sk_send_head)
sk->sk_send_head = skb;
- else if (tp->nonagle&TCP_NAGLE_PUSH)
+ if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
tcp_port_rover = rover;
spin_unlock(&tcp_portalloc_lock);
- /* Exhausted local port range during search? */
+ /* Exhausted local port range during search? It is not
+ * possible for us to be holding one of the bind hash
+ * locks if this test triggers, because if 'remaining'
+ * drops to zero, we broke out of the do/while loop at
+ * the top level, not from the 'break;' statement.
+ */
ret = 1;
- if (remaining <= 0)
+ if (unlikely(remaining <= 0))
goto fail;
/* OK, here is the one we will use. HEAD is
u16 flags;
/* All of a TSO frame must be composed of paged data. */
- BUG_ON(skb->len != skb->data_len);
+ if (skb->len != skb->data_len)
+ return tcp_fragment(sk, skb, len, mss_now);
buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
if (unlikely(buff == NULL))
limit = min(send_win, cong_win);
- /* If sk_send_head can be sent fully now, just do it. */
- if (skb->len <= limit)
- return 0;
-
if (sysctl_tcp_tso_win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
sent_pkts = 0;
while ((skb = sk->sk_send_head)) {
+ unsigned int limit;
+
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
break;
}
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (tso_fragment(sk, skb, limit, mss_now))
- break;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- break;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ break;
+
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
if (likely(cwnd_quota)) {
+ unsigned int limit;
+
BUG_ON(!tso_segs);
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
- return;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- return;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ return;
+
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
- icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_UNK_NEXTHDR, nhoff,
+ skb->dev);
}
- } else {
+ } else
IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
rcu_read_unlock();
return 0;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
struct crypto_tfm *tfm;
break;
case IPQ_COPY_PACKET:
+ if (entry->skb->ip_summed == CHECKSUM_HW &&
+ (*errp = skb_checksum_help(entry->skb,
+ entry->info->outdev == NULL))) {
+ read_unlock_bh(&queue_lock);
+ return NULL;
+ }
if (copy_range == 0 || copy_range > entry->skb->len)
data_len = entry->skb->len;
else
if (!skb_ip_make_writable(&e->skb, v->data_len))
return -ENOMEM;
memcpy(e->skb->data, v->payload, v->data_len);
+ e->skb->ip_summed = CHECKSUM_NONE;
e->skb->nfcache |= NFC_ALTERED;
/*
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (skb->ip_summed == CHECKSUM_HW) {
+ skb_postpull_rcsum(skb, skb->nh.raw,
+ skb->h.raw - skb->nh.raw);
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr,
tcp_port_rover = rover;
spin_unlock(&tcp_portalloc_lock);
- /* Exhausted local port range during search? */
+ /* Exhausted local port range during search? It is not
+ * possible for us to be holding one of the bind hash
+ * locks if this test triggers, because if 'remaining'
+ * drops to zero, we broke out of the do/while loop at
+ * the top level, not from the 'break;' statement.
+ */
ret = 1;
- if (remaining <= 0)
+ if (unlikely(remaining <= 0))
goto fail;
/* OK, here is the one we will use. */
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_sleep = osk->sk_sleep;
-
- if (sock_flag(osk, SOCK_ZAPPED))
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (sock_flag(osk, SOCK_DBG))
- sock_set_flag(sk, SOCK_DBG);
+ sock_copy_flags(sk, osk);
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
- ax25_address *user, *source;
+ ax25_uid_assoc *user;
+ ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
} else {
source = &addr->fsa_ax25.sax25_call;
- if ((user = ax25_findbyuid(current->euid)) == NULL) {
+ user = ax25_findbyuid(current->euid);
+ if (user) {
+ nr->user_addr = user->call;
+ ax25_uid_put(user);
+ } else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
- user = source;
+ nr->user_addr = *source;
}
- nr->user_addr = *user;
nr->source_addr = *source;
}
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
- ax25_address *user, *source = NULL;
+ ax25_address *source = NULL;
+ ax25_uid_assoc *user;
struct net_device *dev;
lock_sock(sk);
}
source = (ax25_address *)dev->dev_addr;
- if ((user = ax25_findbyuid(current->euid)) == NULL) {
+ user = ax25_findbyuid(current->euid);
+ if (user) {
+ nr->user_addr = user->call;
+ ax25_uid_put(user);
+ } else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
- user = source;
+ nr->user_addr = *source;
}
- nr->user_addr = *user;
nr->source_addr = *source;
nr->device = dev;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_sleep = osk->sk_sleep;
-
- if (sock_flag(osk, SOCK_ZAPPED))
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (sock_flag(osk, SOCK_DBG))
- sock_set_flag(sk, SOCK_DBG);
+ sock_copy_flags(sk, osk);
init_timer(&rose->timer);
init_timer(&rose->idletimer);
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
struct net_device *dev;
- ax25_address *user, *source;
+ ax25_address *source;
+ ax25_uid_assoc *user;
int n;
if (!sock_flag(sk, SOCK_ZAPPED))
source = &addr->srose_call;
- if ((user = ax25_findbyuid(current->euid)) == NULL) {
+ user = ax25_findbyuid(current->euid);
+ if (user) {
+ rose->source_call = user->call;
+ ax25_uid_put(user);
+ } else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
- user = source;
+ rose->source_call = *source;
}
rose->source_addr = addr->srose_addr;
- rose->source_call = *user;
rose->device = dev;
rose->source_ndigis = addr->srose_ndigis;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
- ax25_address *user;
struct net_device *dev;
+ ax25_uid_assoc *user;
int n;
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
if ((dev = rose_dev_first()) == NULL)
return -ENETUNREACH;
- if ((user = ax25_findbyuid(current->euid)) == NULL)
+ user = ax25_findbyuid(current->euid);
+ if (!user)
return -EINVAL;
memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
- rose->source_call = *user;
+ rose->source_call = user->call;
rose->device = dev;
+ ax25_uid_put(user);
rose_insert_socket(sk); /* Finish the bind */
}
* 1. The frame isn't for us,
* 2. It isn't "owned" by any existing route.
*/
- if (frametype != ROSE_CALL_REQUEST) /* XXX */
- return 0;
+ if (frametype != ROSE_CALL_REQUEST) { /* XXX */
+ res = 0;
+ goto out;
+ }
len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2;
len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2;
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
+ qdisc_destroy(sch);
errout:
return NULL;
}
SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS),
SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS),
SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS),
+ SNMP_MIB_SENTINEL
};
/* Return the current value of a particular entry in the mib by adding its
sg->page = body->pages[i];
sg->offset = offset;
sg->length = thislen;
- kmap(sg->page); /* XXX kmap_atomic? */
crypto_digest_update(tfm, sg, 1);
- kunmap(sg->page);
len -= thislen;
i++;
offset = 0;
return -EINVAL;
} else {
if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
+ desc->array_len > desc->array_maxlen ||
(unsigned long) base + 4 + desc->array_len *
desc->elem_size > buf->len)
return -EINVAL;
/* ignore __this_module, it will be resolved shortly */
if (strcmp(symname, MODULE_SYMBOL_PREFIX "__this_module") == 0)
break;
-#ifdef STT_REGISTER
+/* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */
+#if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER)
+/* add compatibility with older glibc */
+#ifndef STT_SPARC_REGISTER
+#define STT_SPARC_REGISTER STT_REGISTER
+#endif
if (info->hdr->e_machine == EM_SPARC ||
info->hdr->e_machine == EM_SPARCV9) {
/* Ignore register directives. */
- if (ELF_ST_TYPE(sym->st_info) == STT_REGISTER)
+ if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
break;
}
#endif
endmenu
menu "Open Sound System"
- depends on SOUND!=n && (BROKEN || (!SPARC32 && !SPARC64))
+ depends on SOUND!=n
config SOUND_PRIME
tristate "Open Sound System (DEPRECATED)"
snd-objs := sound.o init.o memory.o info.o control.o misc.o \
device.o wrappers.o
-ifeq ($(CONFIG_ISA),y)
+ifeq ($(CONFIG_ISA_DMA_API),y)
snd-objs += isadma.o
endif
ifeq ($(CONFIG_SND_OSSEMUL),y)
EXPORT_SYMBOL(snd_device_register);
EXPORT_SYMBOL(snd_device_free);
/* isadma.c */
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_DMA_API
EXPORT_SYMBOL(snd_dma_program);
EXPORT_SYMBOL(snd_dma_disable);
EXPORT_SYMBOL(snd_dma_pointer);
# ALSA ISA drivers
menu "ISA devices"
- depends on SND!=n && ISA
+ depends on SND!=n && ISA && ISA_DMA_API
config SND_AD1848_LIB
tristate
# Prompt user for primary drivers.
config SOUND_BT878
tristate "BT878 audio dma"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && PCI
---help---
Audio DMA support for bt878 based grabber boards. As you might have
already noticed, bt878 is listed with two functions in /proc/pci.
config MIDI_EMU10K1
bool "Creative SBLive! MIDI (EXPERIMENTAL)"
- depends on SOUND_EMU10K1 && EXPERIMENTAL
+ depends on SOUND_EMU10K1 && EXPERIMENTAL && ISA_DMA_API
help
Say Y if you want to be able to use the OSS /dev/sequencer
interface. This code is still experimental.
config SOUND_FUSION
tristate "Crystal SoundFusion (CS4280/461x)"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && PCI
help
This module drives the Crystal SoundFusion devices (CS4280/46xx
series) when wired as native sound drivers with AC97 codecs. If
config SOUND_CS4281
tristate "Crystal Sound CS4281"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && PCI
help
Picture and feature list at
<http://www.pcbroker.com/crystal4281.html>.
config SOUND_SONICVIBES
tristate "S3 SonicVibes"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && PCI
help
Say Y or M if you have a PCI sound card utilizing the S3
SonicVibes chipset. To find out if your sound card uses a
config SOUND_TRIDENT
tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && PCI
---help---
Say Y or M if you have a PCI sound card utilizing the Trident
4DWave-DX/NX chipset or your mother board chipset has SiS 7018
config MIDI_VIA82CXXX
bool "VIA 82C686 MIDI"
- depends on SOUND_VIA82CXXX
+ depends on SOUND_VIA82CXXX && ISA_DMA_API
help
Answer Y to use the MIDI interface of the Via686. You may need to
enable this in the BIOS before it will work. This is for connection
config SOUND_OSS
tristate "OSS sound modules"
- depends on SOUND_PRIME
+ depends on SOUND_PRIME && ISA_DMA_API
help
OSS is the Open Sound System suite of sound card drivers. They make
sound programming easier since they provide a common API. Say Y or
obj-$(CONFIG_SOUND_IT8172) += ite8172.o ac97_codec.o
obj-$(CONFIG_SOUND_FORTE) += forte.o ac97_codec.o
-obj-$(CONFIG_SOUND_AD1980) += ac97_plugin_ad1980.o
+obj-$(CONFIG_SOUND_AD1980) += ac97_plugin_ad1980.o ac97_codec.o
obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o
ifeq ($(CONFIG_MIDI_EMU10K1),y)
release_mem_region(card->iobase_mmio_phys, 256);
}
out_pio:
- release_region(card->iobase, 64);
-out_region2:
release_region(card->ac97base, 256);
+out_region2:
+ release_region(card->iobase, 64);
out_region1:
pci_free_consistent(pci_dev, sizeof(struct i810_channel)*NR_HW_CH,
card->channel, card->chandma);
* VIDC sound function prototypes
*/
-/* vidc.c */
-
-extern int vidc_busy;
-
/* vidc_fill.S */
/*
config SND_ALS4000
tristate "Avance Logic ALS4000"
- depends on SND
+ depends on SND && ISA_DMA_API
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
*/
static void snd_pmac_sound_feature(pmac_t *chip, int enable)
{
- ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable);
+ if (ppc_md.feature_call)
+ ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable);
}
/*