blob: 61be23ed50040c86853ca43ca075e2576088b4e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/stddef.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <asm/system.h>
15#include <asm/paca.h>
16#include <asm/iSeries/ItLpQueue.h>
17#include <asm/iSeries/HvLpEvent.h>
18#include <asm/iSeries/HvCallEvent.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
21{
22 int t;
23 u32 * inUseP = &(lpQueue->xInUseWord);
24
25 __asm__ __volatile__("\n\
261: lwarx %0,0,%2 \n\
27 cmpwi 0,%0,0 \n\
28 li %0,0 \n\
29 bne- 2f \n\
30 addi %0,%0,1 \n\
31 stwcx. %0,0,%2 \n\
32 bne- 1b \n\
332: eieio"
34 : "=&r" (t), "=m" (lpQueue->xInUseWord)
35 : "r" (inUseP), "m" (lpQueue->xInUseWord)
36 : "cc");
37
38 return t;
39}
40
41static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
42{
43 lpQueue->xInUseWord = 0;
44}
45
46/* Array of LpEvent handler functions */
47extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
48unsigned long ItLpQueueInProcess = 0;
49
50struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
51{
52 struct HvLpEvent * nextLpEvent =
53 (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
54 if ( nextLpEvent->xFlags.xValid ) {
55 /* rmb() needed only for weakly consistent machines (regatta) */
56 rmb();
57 /* Set pointer to next potential event */
58 lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
59 LpEventAlign ) /
60 LpEventAlign ) *
61 LpEventAlign;
62 /* Wrap to beginning if no room at end */
63 if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
64 lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
65 }
66 else
67 nextLpEvent = NULL;
68
69 return nextLpEvent;
70}
71
Michael Ellerman0c885c12005-06-30 15:07:33 +100072static unsigned long spread_lpevents = NR_CPUS;
Michael Ellermanbea248f2005-06-30 15:07:09 +100073
Linus Torvalds1da177e2005-04-16 15:20:36 -070074int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
75{
Michael Ellermanbea248f2005-06-30 15:07:09 +100076 struct HvLpEvent *next_event;
77
78 if (smp_processor_id() >= spread_lpevents)
79 return 0;
80
81 next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
82 return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85void ItLpQueue_clearValid( struct HvLpEvent * event )
86{
87 /* Clear the valid bit of the event
88 * Also clear bits within this event that might
89 * look like valid bits (on 64-byte boundaries)
90 */
91 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
92 LpEventAlign ) - 1;
93 switch ( extra ) {
94 case 3:
95 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
96 case 2:
97 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
98 case 1:
99 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
100 case 0:
101 ;
102 }
103 mb();
104 event->xFlags.xValid = 0;
105}
106
107unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
108{
109 unsigned numIntsProcessed = 0;
110 struct HvLpEvent * nextLpEvent;
111
112 /* If we have recursed, just return */
113 if ( !set_inUse( lpQueue ) )
114 return 0;
115
116 if (ItLpQueueInProcess == 0)
117 ItLpQueueInProcess = 1;
118 else
119 BUG();
120
121 for (;;) {
122 nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
123 if ( nextLpEvent ) {
124 /* Count events to return to caller
125 * and count processed events in lpQueue
126 */
127 ++numIntsProcessed;
128 lpQueue->xLpIntCount++;
129 /* Call appropriate handler here, passing
130 * a pointer to the LpEvent. The handler
131 * must make a copy of the LpEvent if it
132 * needs it in a bottom half. (perhaps for
133 * an ACK)
134 *
135 * Handlers are responsible for ACK processing
136 *
137 * The Hypervisor guarantees that LpEvents will
138 * only be delivered with types that we have
139 * registered for, so no type check is necessary
140 * here!
141 */
142 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
143 lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
144 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
145 lpEventHandler[nextLpEvent->xType] )
146 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
147 else
148 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
149
150 ItLpQueue_clearValid( nextLpEvent );
151 } else if ( lpQueue->xPlicOverflowIntPending )
152 /*
153 * No more valid events. If overflow events are
154 * pending process them
155 */
156 HvCallEvent_getOverflowLpEvents( lpQueue->xIndex);
157 else
158 break;
159 }
160
161 ItLpQueueInProcess = 0;
162 mb();
163 clear_inUse( lpQueue );
164
165 get_paca()->lpevent_count += numIntsProcessed;
166
167 return numIntsProcessed;
168}
Michael Ellerman0c885c12005-06-30 15:07:33 +1000169
170static int set_spread_lpevents(char *str)
171{
172 unsigned long val = simple_strtoul(str, NULL, 0);
173
174 /*
175 * The parameter is the number of processors to share in processing
176 * lp events.
177 */
178 if (( val > 0) && (val <= NR_CPUS)) {
179 spread_lpevents = val;
180 printk("lpevent processing spread over %ld processors\n", val);
181 } else {
182 printk("invalid spread_lpevents %ld\n", val);
183 }
184
185 return 1;
186}
187__setup("spread_lpevents=", set_spread_lpevents);
188