blob: 786a44dba5bf83120d28a5b15cd786abc3e01da0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/s390/mm/cmm.c
3 *
4 * S390 version
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Collaborative memory management interface.
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/errno.h>
12#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/sysctl.h>
17#include <linux/ctype.h>
18
19#include <asm/pgalloc.h>
20#include <asm/uaccess.h>
21
Martin Schwidefsky15439d72005-05-01 08:58:58 -070022static char *sender = "VMRMSVM";
Heiko Carstens447570c2005-06-21 17:16:29 -070023module_param(sender, charp, 0400);
Martin Schwidefsky15439d72005-05-01 08:58:58 -070024MODULE_PARM_DESC(sender,
25 "Guest name that may send SMSG messages (default VMRMSVM)");
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "../../../drivers/s390/net/smsgiucv.h"
28
29#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
30
31struct cmm_page_array {
32 struct cmm_page_array *next;
33 unsigned long index;
34 unsigned long pages[CMM_NR_PAGES];
35};
36
37static long cmm_pages = 0;
38static long cmm_timed_pages = 0;
39static volatile long cmm_pages_target = 0;
40static volatile long cmm_timed_pages_target = 0;
41static long cmm_timeout_pages = 0;
42static long cmm_timeout_seconds = 0;
43
Al Viroaaedd942006-02-01 06:29:14 -050044static struct cmm_page_array *cmm_page_list = NULL;
45static struct cmm_page_array *cmm_timed_page_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47static unsigned long cmm_thread_active = 0;
48static struct work_struct cmm_thread_starter;
49static wait_queue_head_t cmm_thread_wait;
50static struct timer_list cmm_timer;
51
52static void cmm_timer_fn(unsigned long);
53static void cmm_set_timer(void);
54
55static long
Linus Torvalds1da177e2005-04-16 15:20:36 -070056cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
57{
58 struct cmm_page_array *pa;
59 unsigned long page;
60
61 pa = *list;
62 while (pages) {
63 page = __get_free_page(GFP_NOIO);
64 if (!page)
65 break;
66 if (!pa || pa->index >= CMM_NR_PAGES) {
67 /* Need a new page for the page list. */
68 pa = (struct cmm_page_array *)
69 __get_free_page(GFP_NOIO);
70 if (!pa) {
71 free_page(page);
72 break;
73 }
74 pa->next = *list;
75 pa->index = 0;
76 *list = pa;
77 }
78 diag10(page);
79 pa->pages[pa->index++] = page;
80 (*counter)++;
81 pages--;
82 }
83 return pages;
84}
85
86static void
87cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
88{
89 struct cmm_page_array *pa;
90 unsigned long page;
91
92 pa = *list;
93 while (pages) {
94 if (!pa || pa->index <= 0)
95 break;
96 page = pa->pages[--pa->index];
97 if (pa->index == 0) {
98 pa = pa->next;
99 free_page((unsigned long) *list);
100 *list = pa;
101 }
102 free_page(page);
103 (*counter)--;
104 pages--;
105 }
106}
107
108static int
109cmm_thread(void *dummy)
110{
111 int rc;
112
113 daemonize("cmmthread");
114 while (1) {
115 rc = wait_event_interruptible(cmm_thread_wait,
116 (cmm_pages != cmm_pages_target ||
117 cmm_timed_pages != cmm_timed_pages_target));
118 if (rc == -ERESTARTSYS) {
119 /* Got kill signal. End thread. */
120 clear_bit(0, &cmm_thread_active);
121 cmm_pages_target = cmm_pages;
122 cmm_timed_pages_target = cmm_timed_pages;
123 break;
124 }
125 if (cmm_pages_target > cmm_pages) {
126 if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
127 cmm_pages_target = cmm_pages;
128 } else if (cmm_pages_target < cmm_pages) {
129 cmm_free_pages(1, &cmm_pages, &cmm_page_list);
130 }
131 if (cmm_timed_pages_target > cmm_timed_pages) {
132 if (cmm_alloc_pages(1, &cmm_timed_pages,
133 &cmm_timed_page_list))
134 cmm_timed_pages_target = cmm_timed_pages;
135 } else if (cmm_timed_pages_target < cmm_timed_pages) {
136 cmm_free_pages(1, &cmm_timed_pages,
137 &cmm_timed_page_list);
138 }
139 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
140 cmm_set_timer();
141 }
142 return 0;
143}
144
145static void
146cmm_start_thread(void)
147{
Heiko Carstensd2c993d2006-07-12 16:41:55 +0200148 kernel_thread(cmm_thread, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
151static void
152cmm_kick_thread(void)
153{
154 if (!test_and_set_bit(0, &cmm_thread_active))
155 schedule_work(&cmm_thread_starter);
156 wake_up(&cmm_thread_wait);
157}
158
159static void
160cmm_set_timer(void)
161{
162 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
163 if (timer_pending(&cmm_timer))
164 del_timer(&cmm_timer);
165 return;
166 }
167 if (timer_pending(&cmm_timer)) {
168 if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
169 return;
170 }
171 cmm_timer.function = cmm_timer_fn;
172 cmm_timer.data = 0;
173 cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
174 add_timer(&cmm_timer);
175}
176
177static void
178cmm_timer_fn(unsigned long ignored)
179{
180 long pages;
181
182 pages = cmm_timed_pages_target - cmm_timeout_pages;
183 if (pages < 0)
184 cmm_timed_pages_target = 0;
185 else
186 cmm_timed_pages_target = pages;
187 cmm_kick_thread();
188 cmm_set_timer();
189}
190
191void
192cmm_set_pages(long pages)
193{
194 cmm_pages_target = pages;
195 cmm_kick_thread();
196}
197
198long
199cmm_get_pages(void)
200{
201 return cmm_pages;
202}
203
204void
205cmm_add_timed_pages(long pages)
206{
207 cmm_timed_pages_target += pages;
208 cmm_kick_thread();
209}
210
211long
212cmm_get_timed_pages(void)
213{
214 return cmm_timed_pages;
215}
216
217void
218cmm_set_timeout(long pages, long seconds)
219{
220 cmm_timeout_pages = pages;
221 cmm_timeout_seconds = seconds;
222 cmm_set_timer();
223}
224
225static inline int
226cmm_skip_blanks(char *cp, char **endp)
227{
228 char *str;
229
230 for (str = cp; *str == ' ' || *str == '\t'; str++);
231 *endp = str;
232 return str != cp;
233}
234
235#ifdef CONFIG_CMM_PROC
236/* These will someday get removed. */
237#define VM_CMM_PAGES 1111
238#define VM_CMM_TIMED_PAGES 1112
239#define VM_CMM_TIMEOUT 1113
240
241static struct ctl_table cmm_table[];
242
243static int
244cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
Al Viroaaedd942006-02-01 06:29:14 -0500245 void __user *buffer, size_t *lenp, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 char buf[16], *p;
248 long pages;
249 int len;
250
251 if (!*lenp || (*ppos && !write)) {
252 *lenp = 0;
253 return 0;
254 }
255
256 if (write) {
257 len = *lenp;
258 if (copy_from_user(buf, buffer,
259 len > sizeof(buf) ? sizeof(buf) : len))
260 return -EFAULT;
261 buf[sizeof(buf) - 1] = '\0';
262 cmm_skip_blanks(buf, &p);
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200263 pages = simple_strtoul(p, &p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 if (ctl == &cmm_table[0])
265 cmm_set_pages(pages);
266 else
267 cmm_add_timed_pages(pages);
268 } else {
269 if (ctl == &cmm_table[0])
270 pages = cmm_get_pages();
271 else
272 pages = cmm_get_timed_pages();
273 len = sprintf(buf, "%ld\n", pages);
274 if (len > *lenp)
275 len = *lenp;
276 if (copy_to_user(buffer, buf, len))
277 return -EFAULT;
278 }
279 *lenp = len;
280 *ppos += len;
281 return 0;
282}
283
284static int
285cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
Al Viroaaedd942006-02-01 06:29:14 -0500286 void __user *buffer, size_t *lenp, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 char buf[64], *p;
289 long pages, seconds;
290 int len;
291
292 if (!*lenp || (*ppos && !write)) {
293 *lenp = 0;
294 return 0;
295 }
296
297 if (write) {
298 len = *lenp;
299 if (copy_from_user(buf, buffer,
300 len > sizeof(buf) ? sizeof(buf) : len))
301 return -EFAULT;
302 buf[sizeof(buf) - 1] = '\0';
303 cmm_skip_blanks(buf, &p);
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200304 pages = simple_strtoul(p, &p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 cmm_skip_blanks(p, &p);
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200306 seconds = simple_strtoul(p, &p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 cmm_set_timeout(pages, seconds);
308 } else {
309 len = sprintf(buf, "%ld %ld\n",
310 cmm_timeout_pages, cmm_timeout_seconds);
311 if (len > *lenp)
312 len = *lenp;
313 if (copy_to_user(buffer, buf, len))
314 return -EFAULT;
315 }
316 *lenp = len;
317 *ppos += len;
318 return 0;
319}
320
321static struct ctl_table cmm_table[] = {
322 {
323 .ctl_name = VM_CMM_PAGES,
324 .procname = "cmm_pages",
Martin Schwidefsky5e8b1c42006-03-24 03:15:16 -0800325 .mode = 0644,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 .proc_handler = &cmm_pages_handler,
327 },
328 {
329 .ctl_name = VM_CMM_TIMED_PAGES,
330 .procname = "cmm_timed_pages",
Martin Schwidefsky5e8b1c42006-03-24 03:15:16 -0800331 .mode = 0644,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 .proc_handler = &cmm_pages_handler,
333 },
334 {
335 .ctl_name = VM_CMM_TIMEOUT,
336 .procname = "cmm_timeout",
Martin Schwidefsky5e8b1c42006-03-24 03:15:16 -0800337 .mode = 0644,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 .proc_handler = &cmm_timeout_handler,
339 },
340 { .ctl_name = 0 }
341};
342
343static struct ctl_table cmm_dir_table[] = {
344 {
345 .ctl_name = CTL_VM,
346 .procname = "vm",
347 .maxlen = 0,
348 .mode = 0555,
349 .child = cmm_table,
350 },
351 { .ctl_name = 0 }
352};
353#endif
354
355#ifdef CONFIG_CMM_IUCV
356#define SMSG_PREFIX "CMM"
357static void
Martin Schwidefsky15439d72005-05-01 08:58:58 -0700358cmm_smsg_target(char *from, char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359{
360 long pages, seconds;
361
Martin Schwidefsky15439d72005-05-01 08:58:58 -0700362 if (strlen(sender) > 0 && strcmp(from, sender) != 0)
363 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
365 return;
366 if (strncmp(msg, "SHRINK", 6) == 0) {
367 if (!cmm_skip_blanks(msg + 6, &msg))
368 return;
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200369 pages = simple_strtoul(msg, &msg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 cmm_skip_blanks(msg, &msg);
371 if (*msg == '\0')
372 cmm_set_pages(pages);
373 } else if (strncmp(msg, "RELEASE", 7) == 0) {
374 if (!cmm_skip_blanks(msg + 7, &msg))
375 return;
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200376 pages = simple_strtoul(msg, &msg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 cmm_skip_blanks(msg, &msg);
378 if (*msg == '\0')
379 cmm_add_timed_pages(pages);
380 } else if (strncmp(msg, "REUSE", 5) == 0) {
381 if (!cmm_skip_blanks(msg + 5, &msg))
382 return;
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200383 pages = simple_strtoul(msg, &msg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 if (!cmm_skip_blanks(msg, &msg))
385 return;
Heiko Carstens7d5d6882006-09-20 15:59:00 +0200386 seconds = simple_strtoul(msg, &msg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 cmm_skip_blanks(msg, &msg);
388 if (*msg == '\0')
389 cmm_set_timeout(pages, seconds);
390 }
391}
392#endif
393
394struct ctl_table_header *cmm_sysctl_header;
395
396static int
397cmm_init (void)
398{
399#ifdef CONFIG_CMM_PROC
400 cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
401#endif
402#ifdef CONFIG_CMM_IUCV
403 smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
404#endif
Al Viroaaedd942006-02-01 06:29:14 -0500405 INIT_WORK(&cmm_thread_starter, (void *) cmm_start_thread, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 init_waitqueue_head(&cmm_thread_wait);
407 init_timer(&cmm_timer);
408 return 0;
409}
410
411static void
412cmm_exit(void)
413{
414 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
415 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
416#ifdef CONFIG_CMM_PROC
417 unregister_sysctl_table(cmm_sysctl_header);
418#endif
419#ifdef CONFIG_CMM_IUCV
420 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
421#endif
422}
423
424module_init(cmm_init);
425module_exit(cmm_exit);
426
427EXPORT_SYMBOL(cmm_set_pages);
428EXPORT_SYMBOL(cmm_get_pages);
429EXPORT_SYMBOL(cmm_add_timed_pages);
430EXPORT_SYMBOL(cmm_get_timed_pages);
431EXPORT_SYMBOL(cmm_set_timeout);
432
433MODULE_LICENSE("GPL");