blob: e5233c2a6c315185196f0ae22b68fc496541d314 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H
3
4#include <linux/huge_mm.h>
5
6static inline int page_is_file_cache(struct page *page)
7{
8 return !PageSwapBacked(page);
9}
10
11static inline void
12add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
13{
14 struct lruvec *lruvec;
15
16 lruvec = mem_cgroup_lru_add_list(zone, page, lru);
17 list_add(&page->lru, &lruvec->lists[lru]);
18 __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
19}
20
21static inline void
22del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
23{
24 mem_cgroup_lru_del_list(page, lru);
25 list_del(&page->lru);
26 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
27}
28
29static inline enum lru_list page_lru_base_type(struct page *page)
30{
31 if (page_is_file_cache(page))
32 return LRU_INACTIVE_FILE;
33 return LRU_INACTIVE_ANON;
34}
35
36static inline enum lru_list page_off_lru(struct page *page)
37{
38 enum lru_list lru;
39
40 if (PageUnevictable(page)) {
41 __ClearPageUnevictable(page);
42 lru = LRU_UNEVICTABLE;
43 } else {
44 lru = page_lru_base_type(page);
45 if (PageActive(page)) {
46 __ClearPageActive(page);
47 lru += LRU_ACTIVE;
48 }
49 }
50 return lru;
51}
52
53static inline enum lru_list page_lru(struct page *page)
54{
55 enum lru_list lru;
56
57 if (PageUnevictable(page))
58 lru = LRU_UNEVICTABLE;
59 else {
60 lru = page_lru_base_type(page);
61 if (PageActive(page))
62 lru += LRU_ACTIVE;
63 }
64 return lru;
65}
66
67#endif