blob: ca4a48d0d31144f341229e9352a88d85f3b7964d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001
2#include <linux/mm.h>
3#include <linux/file.h>
Bryan Wueb280622008-05-04 23:12:55 +08004#include <linux/fdtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/mount.h>
Kees Cook5096add2007-05-08 00:26:04 -07006#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/seq_file.h>
8#include "internal.h"
9
10/*
11 * Logic: we've got two memory sums for each process, "shared", and
Frederik Schwarzer025dfda2008-10-16 19:02:37 +020012 * "non-shared". Shared memory may get counted more than once, for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * each process that owns it. Non-shared memory is counted
14 * accurately.
15 */
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080016void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017{
David Howells8feae132009-01-08 12:04:47 +000018 struct vm_area_struct *vma;
19 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 unsigned long bytes = 0, sbytes = 0, slack = 0;
21
22 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +000023 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
24 vma = rb_entry(p, struct vm_area_struct, vm_rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
David Howells8feae132009-01-08 12:04:47 +000026 bytes += kobjsize(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 if (atomic_read(&mm->mm_count) > 1 ||
David Howells8feae132009-01-08 12:04:47 +000028 vma->vm_region ||
29 vma->vm_flags & VM_MAYSHARE) {
30 sbytes += kobjsize((void *) vma->vm_start);
31 if (vma->vm_region)
32 sbytes += kobjsize(vma->vm_region);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 } else {
David Howells8feae132009-01-08 12:04:47 +000034 bytes += kobjsize((void *) vma->vm_start);
35 slack += kobjsize((void *) vma->vm_start) -
36 (vma->vm_end - vma->vm_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 }
38 }
39
40 if (atomic_read(&mm->mm_count) > 1)
41 sbytes += kobjsize(mm);
42 else
43 bytes += kobjsize(mm);
44
45 if (current->fs && atomic_read(&current->fs->count) > 1)
46 sbytes += kobjsize(current->fs);
47 else
48 bytes += kobjsize(current->fs);
49
50 if (current->files && atomic_read(&current->files->count) > 1)
51 sbytes += kobjsize(current->files);
52 else
53 bytes += kobjsize(current->files);
54
55 if (current->sighand && atomic_read(&current->sighand->count) > 1)
56 sbytes += kobjsize(current->sighand);
57 else
58 bytes += kobjsize(current->sighand);
59
60 bytes += kobjsize(current); /* includes kernel stack */
61
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080062 seq_printf(m,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 "Mem:\t%8lu bytes\n"
64 "Slack:\t%8lu bytes\n"
65 "Shared:\t%8lu bytes\n",
66 bytes, slack, sbytes);
67
68 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
71unsigned long task_vsize(struct mm_struct *mm)
72{
David Howells8feae132009-01-08 12:04:47 +000073 struct vm_area_struct *vma;
74 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 unsigned long vsize = 0;
76
77 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +000078 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
79 vma = rb_entry(p, struct vm_area_struct, vm_rb);
80 vsize += vma->vm_region->vm_end - vma->vm_region->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82 up_read(&mm->mmap_sem);
83 return vsize;
84}
85
86int task_statm(struct mm_struct *mm, int *shared, int *text,
87 int *data, int *resident)
88{
David Howells8feae132009-01-08 12:04:47 +000089 struct vm_area_struct *vma;
90 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 int size = kobjsize(mm);
92
93 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +000094 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
95 vma = rb_entry(p, struct vm_area_struct, vm_rb);
96 size += kobjsize(vma);
97 size += kobjsize((void *) vma->vm_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 }
99
100 size += (*text = mm->end_code - mm->start_code);
101 size += (*data = mm->start_stack - mm->start_data);
102 up_read(&mm->mmap_sem);
103 *resident = size;
104 return size;
105}
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107/*
David Howells8feae132009-01-08 12:04:47 +0000108 * display a single VMA to a sequenced file
109 */
110static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
111{
112 unsigned long ino = 0;
113 struct file *file;
114 dev_t dev = 0;
115 int flags, len;
116
117 flags = vma->vm_flags;
118 file = vma->vm_file;
119
120 if (file) {
121 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
122 dev = inode->i_sb->s_dev;
123 ino = inode->i_ino;
124 }
125
126 seq_printf(m,
127 "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
128 vma->vm_start,
129 vma->vm_end,
130 flags & VM_READ ? 'r' : '-',
131 flags & VM_WRITE ? 'w' : '-',
132 flags & VM_EXEC ? 'x' : '-',
133 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
134 vma->vm_pgoff << PAGE_SHIFT,
135 MAJOR(dev), MINOR(dev), ino, &len);
136
137 if (file) {
138 len = 25 + sizeof(void *) * 6 - len;
139 if (len < 1)
140 len = 1;
141 seq_printf(m, "%*c", len, ' ');
142 seq_path(m, &file->f_path, "");
143 }
144
145 seq_putc(m, '\n');
146 return 0;
147}
148
149/*
David Howellsdbf86852006-09-27 01:50:19 -0700150 * display mapping lines for a particular process's /proc/pid/maps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 */
David Howells8feae132009-01-08 12:04:47 +0000152static int show_map(struct seq_file *m, void *_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
David Howells8feae132009-01-08 12:04:47 +0000154 struct rb_node *p = _p;
Kees Cook5096add2007-05-08 00:26:04 -0700155
David Howells8feae132009-01-08 12:04:47 +0000156 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
David Howellsdbf86852006-09-27 01:50:19 -0700158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159static void *m_start(struct seq_file *m, loff_t *pos)
160{
David Howellsdbf86852006-09-27 01:50:19 -0700161 struct proc_maps_private *priv = m->private;
David Howellsdbf86852006-09-27 01:50:19 -0700162 struct mm_struct *mm;
David Howells8feae132009-01-08 12:04:47 +0000163 struct rb_node *p;
David Howellsdbf86852006-09-27 01:50:19 -0700164 loff_t n = *pos;
165
166 /* pin the task and mm whilst we play with them */
167 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
168 if (!priv->task)
169 return NULL;
170
Al Viro831830b2008-01-02 14:09:57 +0000171 mm = mm_for_maps(priv->task);
David Howellsdbf86852006-09-27 01:50:19 -0700172 if (!mm) {
173 put_task_struct(priv->task);
174 priv->task = NULL;
175 return NULL;
176 }
177
David Howellsdbf86852006-09-27 01:50:19 -0700178 /* start from the Nth VMA */
David Howells8feae132009-01-08 12:04:47 +0000179 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
David Howellsdbf86852006-09-27 01:50:19 -0700180 if (n-- == 0)
David Howells8feae132009-01-08 12:04:47 +0000181 return p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 return NULL;
183}
David Howellsdbf86852006-09-27 01:50:19 -0700184
185static void m_stop(struct seq_file *m, void *_vml)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
David Howellsdbf86852006-09-27 01:50:19 -0700187 struct proc_maps_private *priv = m->private;
188
189 if (priv->task) {
190 struct mm_struct *mm = priv->task->mm;
191 up_read(&mm->mmap_sem);
192 mmput(mm);
193 put_task_struct(priv->task);
194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
David Howellsdbf86852006-09-27 01:50:19 -0700196
David Howells8feae132009-01-08 12:04:47 +0000197static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
David Howells8feae132009-01-08 12:04:47 +0000199 struct rb_node *p = _p;
David Howellsdbf86852006-09-27 01:50:19 -0700200
201 (*pos)++;
David Howells8feae132009-01-08 12:04:47 +0000202 return p ? rb_next(p) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
David Howellsdbf86852006-09-27 01:50:19 -0700204
Jan Engelhardt03a44822008-02-08 04:21:19 -0800205static const struct seq_operations proc_pid_maps_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 .start = m_start,
207 .next = m_next,
208 .stop = m_stop,
209 .show = show_map
210};
Eric W. Biederman662795d2006-06-26 00:25:48 -0700211
212static int maps_open(struct inode *inode, struct file *file)
213{
David Howellsdbf86852006-09-27 01:50:19 -0700214 struct proc_maps_private *priv;
215 int ret = -ENOMEM;
216
217 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
218 if (priv) {
219 priv->pid = proc_pid(inode);
220 ret = seq_open(file, &proc_pid_maps_ops);
221 if (!ret) {
222 struct seq_file *m = file->private_data;
223 m->private = priv;
224 } else {
225 kfree(priv);
226 }
Eric W. Biederman662795d2006-06-26 00:25:48 -0700227 }
228 return ret;
229}
230
Arjan van de Ven00977a52007-02-12 00:55:34 -0800231const struct file_operations proc_maps_operations = {
Eric W. Biederman662795d2006-06-26 00:25:48 -0700232 .open = maps_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
David Howellsdbf86852006-09-27 01:50:19 -0700235 .release = seq_release_private,
Eric W. Biederman662795d2006-06-26 00:25:48 -0700236};
237