3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <asm/system.h>
17 #include <asm/iSeries/ItLpQueue.h>
18 #include <asm/iSeries/HvLpEvent.h>
19 #include <asm/iSeries/HvCallEvent.h>
21 static __inline__ int set_inUse(void)
24 u32 * inUseP = &xItLpQueue.xInUseWord;
26 __asm__ __volatile__("\n\
35 : "=&r" (t), "=m" (xItLpQueue.xInUseWord)
36 : "r" (inUseP), "m" (xItLpQueue.xInUseWord)
42 static __inline__ void clear_inUse(void)
44 xItLpQueue.xInUseWord = 0;
47 /* Array of LpEvent handler functions */
48 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49 unsigned long ItLpQueueInProcess = 0;
51 struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
53 struct HvLpEvent * nextLpEvent =
54 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
55 if ( nextLpEvent->xFlags.xValid ) {
56 /* rmb() needed only for weakly consistent machines (regatta) */
58 /* Set pointer to next potential event */
59 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
63 /* Wrap to beginning if no room at end */
64 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
65 xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
73 static unsigned long spread_lpevents = NR_CPUS;
75 int ItLpQueue_isLpIntPending(void)
77 struct HvLpEvent *next_event;
79 if (smp_processor_id() >= spread_lpevents)
82 next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
83 return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
86 void ItLpQueue_clearValid( struct HvLpEvent * event )
88 /* Clear the valid bit of the event
89 * Also clear bits within this event that might
90 * look like valid bits (on 64-byte boundaries)
92 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
96 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
98 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
100 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
105 event->xFlags.xValid = 0;
108 unsigned ItLpQueue_process(struct pt_regs *regs)
110 unsigned numIntsProcessed = 0;
111 struct HvLpEvent * nextLpEvent;
113 /* If we have recursed, just return */
117 if (ItLpQueueInProcess == 0)
118 ItLpQueueInProcess = 1;
123 nextLpEvent = ItLpQueue_getNextLpEvent();
125 /* Count events to return to caller
126 * and count processed events in xItLpQueue
129 xItLpQueue.xLpIntCount++;
130 /* Call appropriate handler here, passing
131 * a pointer to the LpEvent. The handler
132 * must make a copy of the LpEvent if it
133 * needs it in a bottom half. (perhaps for
136 * Handlers are responsible for ACK processing
138 * The Hypervisor guarantees that LpEvents will
139 * only be delivered with types that we have
140 * registered for, so no type check is necessary
143 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
144 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
145 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
146 lpEventHandler[nextLpEvent->xType] )
147 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
149 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
151 ItLpQueue_clearValid( nextLpEvent );
152 } else if ( xItLpQueue.xPlicOverflowIntPending )
154 * No more valid events. If overflow events are
155 * pending process them
157 HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
162 ItLpQueueInProcess = 0;
166 get_paca()->lpevent_count += numIntsProcessed;
168 return numIntsProcessed;
171 static int set_spread_lpevents(char *str)
173 unsigned long val = simple_strtoul(str, NULL, 0);
176 * The parameter is the number of processors to share in processing
179 if (( val > 0) && (val <= NR_CPUS)) {
180 spread_lpevents = val;
181 printk("lpevent processing spread over %ld processors\n", val);
183 printk("invalid spread_lpevents %ld\n", val);
188 __setup("spread_lpevents=", set_spread_lpevents);
190 void setup_hvlpevent_queue(void)
195 * Allocate a page for the Event Stack. The Hypervisor needs the
196 * absolute real address, so we subtract out the KERNELBASE and add
197 * in the absolute real address of the kernel load area.
199 eventStack = alloc_bootmem_pages(LpEventStackSize);
200 memset(eventStack, 0, LpEventStackSize);
202 /* Invoke the hypervisor to initialize the event stack */
203 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
205 xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
206 xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
207 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
208 (LpEventStackSize - LpEventMaxSize);
209 xItLpQueue.xIndex = 0;