Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/mm/ashmem.c b/mm/ashmem.c
index 66e3f23..c7e72bb 100644
--- a/mm/ashmem.c
+++ b/mm/ashmem.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/shmem_fs.h>
 #include <linux/ashmem.h>
+#include <asm/cacheflush.h>
 
 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
@@ -45,6 +46,8 @@
 	struct list_head unpinned_list;	/* list of all ashmem areas */
 	struct file *file;		/* the shmem-based backing file */
 	size_t size;			/* size of the mapping, in bytes */
+	unsigned long vm_start;		/* Start address of vm_area
+					 * which maps this ashmem */
 	unsigned long prot_mask;	/* allowed prot bits, as vm_flags */
 };
 
@@ -326,6 +329,7 @@
 		vma->vm_file = asma->file;
 	}
 	vma->vm_flags |= VM_CAN_NONLINEAR;
+	asma->vm_start = vma->vm_start;
 
 out:
 	mutex_unlock(&ashmem_mutex);
@@ -626,6 +630,84 @@
 	return ret;
 }
 
+#ifdef CONFIG_OUTER_CACHE
+static unsigned int kgsl_virtaddr_to_physaddr(unsigned int virtaddr)
+{
+	unsigned int physaddr = 0;
+	pgd_t *pgd_ptr = NULL;
+	pmd_t *pmd_ptr = NULL;
+	pte_t *pte_ptr = NULL, pte;
+
+	pgd_ptr = pgd_offset(current->mm, virtaddr);
+	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+		pr_info
+		    ("Invalid pgd entry found while trying to convert virtual "
+		     "address to physical\n");
+		return 0;
+	}
+
+	pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
+	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+		pr_info
+		    ("Invalid pmd entry found while trying to convert virtual "
+		     "address to physical\n");
+		return 0;
+	}
+
+	pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
+	if (!pte_ptr) {
+		pr_info
+		    ("Unable to map pte entry while trying to convert virtual "
+		     "address to physical\n");
+		return 0;
+	}
+	pte = *pte_ptr;
+	physaddr = pte_pfn(pte);
+	pte_unmap(pte_ptr);
+	physaddr <<= PAGE_SHIFT;
+	return physaddr;
+}
+#endif
+
+static int ashmem_flush_cache_range(struct ashmem_area *asma)
+{
+#ifdef CONFIG_OUTER_CACHE
+	unsigned long end;
+#endif
+	unsigned long addr;
+	unsigned int size, result = 0;
+
+	mutex_lock(&ashmem_mutex);
+
+	size = asma->size;
+	addr = asma->vm_start;
+	if (!addr || (addr & (PAGE_SIZE - 1)) || !size ||
+		(size & (PAGE_SIZE - 1))) {
+		result =  -EINVAL;
+		goto done;
+	}
+
+#ifdef CONFIG_OUTER_CACHE
+	flush_cache_user_range(addr, addr + size);
+	for (end = addr; end < (addr + size); end += PAGE_SIZE) {
+		unsigned long physaddr;
+		physaddr = kgsl_virtaddr_to_physaddr(end);
+		if (!physaddr) {
+			result =  -EINVAL;
+			goto done;
+		}
+
+		outer_flush_range(physaddr, physaddr + PAGE_SIZE);
+	}
+	mb();
+#else
+	clean_and_invalidate_caches(addr, size, 0);
+#endif
+done:
+	mutex_unlock(&ashmem_mutex);
+	return 0;
+}
+
 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct ashmem_area *asma = file->private_data;
@@ -671,11 +753,67 @@
 			ashmem_shrink(&ashmem_shrinker, &sc);
 		}
 		break;
+	case ASHMEM_CACHE_FLUSH_RANGE:
+		ret = ashmem_flush_cache_range(asma);
+		break;
 	}
 
 	return ret;
 }
 
+static int is_ashmem_file(struct file *file)
+{
+	char fname[256], *name;
+	name = dentry_path(file->f_dentry, fname, 256);
+	return strcmp(name, "/ashmem") ? 0 : 1;
+}
+
+int get_ashmem_file(int fd, struct file **filp, struct file **vm_file,
+			unsigned long *len)
+{
+	int ret = -1;
+	struct file *file = fget(fd);
+	*filp = NULL;
+	*vm_file = NULL;
+	if (unlikely(file == NULL)) {
+		pr_err("ashmem: %s: requested data from file "
+			"descriptor that doesn't exist.\n", __func__);
+	} else {
+		char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+		pr_debug("filp %p rdev %d pid %u(%s) file %p(%ld)"
+			" dev id: %d\n", filp,
+			file->f_dentry->d_inode->i_rdev,
+			current->pid, get_task_comm(currtask_name, current),
+			file, file_count(file),
+			MINOR(file->f_dentry->d_inode->i_rdev));
+		if (is_ashmem_file(file)) {
+			struct ashmem_area *asma = file->private_data;
+			*filp = file;
+			*vm_file = asma->file;
+			*len = asma->size;
+			ret = 0;
+		} else {
+			pr_err("file descriptor is not an ashmem "
+				"region fd: %d\n", fd);
+			fput(file);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(get_ashmem_file);
+
+void put_ashmem_file(struct file *file)
+{
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+	pr_debug("rdev %d pid %u(%s) file %p(%ld)" " dev id: %d\n",
+		file->f_dentry->d_inode->i_rdev, current->pid,
+		get_task_comm(currtask_name, current), file,
+		file_count(file), MINOR(file->f_dentry->d_inode->i_rdev));
+	if (file && is_ashmem_file(file))
+		fput(file);
+}
+EXPORT_SYMBOL(put_ashmem_file);
+
 static struct file_operations ashmem_fops = {
 	.owner = THIS_MODULE,
 	.open = ashmem_open,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c46887b..b4e4296 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -112,9 +112,10 @@
 
 static void register_page_bootmem_info_section(unsigned long start_pfn)
 {
-	unsigned long *usemap, mapsize, section_nr, i;
+	unsigned long *usemap, mapsize, page_mapsize, section_nr, i, j;
 	struct mem_section *ms;
-	struct page *page, *memmap;
+	struct page *page, *memmap, *page_page;
+	int memmap_page_valid;
 
 	if (!pfn_valid(start_pfn))
 		return;
@@ -133,9 +134,21 @@
 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
 
-	/* remember memmap's page */
-	for (i = 0; i < mapsize; i++, page++)
-		get_page_bootmem(section_nr, page, SECTION_INFO);
+	page_mapsize = PAGE_SIZE/sizeof(struct page);
+
+	/* remember memmap's page, except those that reference only holes */
+	for (i = 0; i < mapsize; i++, page++) {
+		memmap_page_valid = 0;
+		page_page = __va(page_to_pfn(page) << PAGE_SHIFT);
+		for (j = 0; j < page_mapsize; j++, page_page++) {
+			if (early_pfn_valid(page_to_pfn(page_page))) {
+				memmap_page_valid = 1;
+				break;
+			}
+		}
+		if (memmap_page_valid)
+			get_page_bootmem(section_nr, page, SECTION_INFO);
+	}
 
 	usemap = __nr_to_section(section_nr)->pageblock_flags;
 	page = virt_to_page(usemap);
@@ -596,6 +609,51 @@
 }
 EXPORT_SYMBOL_GPL(add_memory);
 
+int __ref physical_remove_memory(u64 start, u64 size)
+{
+	int ret;
+	struct resource *res, *res_old;
+	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+	BUG_ON(!res);
+
+	ret = arch_physical_remove_memory(start, size);
+	if (ret) {
+		kfree(res);
+		return ret;
+	}
+
+	res->name = "System RAM";
+	res->start = start;
+	res->end = start + size - 1;
+	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+	res_old = locate_resource(&iomem_resource, res);
+	if (res_old)
+		release_memory_resource(res_old);
+	kfree(res);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(physical_remove_memory);
+
+int __ref physical_active_memory(u64 start, u64 size)
+{
+	int ret;
+
+	ret = arch_physical_active_memory(start, size);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(physical_active_memory);
+
+int __ref physical_low_power_memory(u64 start, u64 size)
+{
+	int ret;
+
+	ret = arch_physical_low_power_memory(start, size);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(physical_low_power_memory);
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
@@ -934,6 +992,23 @@
 	end_pfn = start_pfn + PFN_DOWN(size);
 	return offline_pages(start_pfn, end_pfn, 120 * HZ);
 }
+
+void reserve_hotplug_pages(unsigned long start_pfn, unsigned long nr_pages)
+{
+	nr_pages = ((nr_pages + pageblock_nr_pages - 1) >> pageblock_order)
+		<< pageblock_order;
+	offline_isolated_pages(start_pfn, start_pfn + nr_pages);
+}
+
+void unreserve_hotplug_pages(unsigned long start_pfn, unsigned long nr_pages)
+{
+	unsigned long onlined_pages = 0;
+
+	nr_pages = ((nr_pages + pageblock_nr_pages - 1) >> pageblock_order)
+		<< pageblock_order;
+	online_pages_range(start_pfn, nr_pages, &onlined_pages);
+}
+
 #else
 int remove_memory(u64 start, u64 size)
 {
diff --git a/mm/sparse.c b/mm/sparse.c
index aa64b12..8193ed8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -121,8 +121,10 @@
 int __section_nr(struct mem_section* ms)
 {
 	unsigned long root_nr;
-	struct mem_section* root;
+	struct mem_section *root;
 
+	if (NR_SECTION_ROOTS == 0)
+		return ms - __nr_to_section(0);
 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
 		if (!root)