summaryrefslogtreecommitdiffstats
path: root/uClinux-2.4.20-uc1/mmnommu
diff options
context:
space:
mode:
authorOliver Schinagl <oliver@schinagl.nl>2011-03-01 13:19:47 (GMT)
committerOliver Schinagl <oliver@schinagl.nl>2011-03-01 13:19:47 (GMT)
commitc5ed56c50061cfaadb6dfa80f0107a605ae1f630 (patch)
tree704338dff4fe9575cafb9a86bfe335fd1dbf933c /uClinux-2.4.20-uc1/mmnommu
parenta00bc04c717debbeeaa3010605ac0318f8e4c98d (diff)
downloadopenipcam-c5ed56c50061cfaadb6dfa80f0107a605ae1f630.zip
openipcam-c5ed56c50061cfaadb6dfa80f0107a605ae1f630.tar.gz
openipcam-c5ed56c50061cfaadb6dfa80f0107a605ae1f630.tar.bz2
Bare uClinux-2.4.20-uc1 CVS pull
Diffstat (limited to 'uClinux-2.4.20-uc1/mmnommu')
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/Makefile24
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/bootmem.c370
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/filemap.c3247
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/memory.c181
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/mlock.c35
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/mmap.c1574
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/mprotect.c18
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/mremap.c26
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/numa.c130
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/oom_kill.c254
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/page_alloc.c892
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/page_alloc2.c1015
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/page_alloc2.hack64
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/slab.c2180
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/swap.c113
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/swap_state.c266
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/swapfile.c1316
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/vmalloc.c51
-rw-r--r--uClinux-2.4.20-uc1/mmnommu/vmscan.c799
19 files changed, 12555 insertions, 0 deletions
diff --git a/uClinux-2.4.20-uc1/mmnommu/Makefile b/uClinux-2.4.20-uc1/mmnommu/Makefile
new file mode 100644
index 0000000..2764f61
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the linux memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := mmnommu.o
+obj-y := bootmem.o filemap.o mlock.o mmap.o \
+ mprotect.o mremap.o numa.o \
+ slab.o swapfile.o vmalloc.o vmscan.o memory.o \
+ swap.o oom_kill.o
+
+ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+obj-y += page_alloc2.o
+else
+obj-y += page_alloc.o
+endif
+
+export-objs := filemap.o memory.o page_alloc.o page_alloc2.o
+
+include $(TOPDIR)/Rules.make
diff --git a/uClinux-2.4.20-uc1/mmnommu/bootmem.c b/uClinux-2.4.20-uc1/mmnommu/bootmem.c
new file mode 100644
index 0000000..283e48b
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/bootmem.c
@@ -0,0 +1,370 @@
+/*
+ * linux/mm/bootmem.c
+ *
+ * Copyright (C) 1999 Ingo Molnar
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ *
+ * simple boot-time physical memory area allocator and
+ * free memory collector. It's used to deal with reserved
+ * system memory and memory holes as well.
+ */
+
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#ifdef NO_MM
+#include <asm/virtconvert.h>
+#endif
+
+/*
+ * Access to this subsystem has to be serialized externally. (this is
+ * true for the boot process anyway)
+ */
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+/* return the number of _pages_ that will be allocated for the boot bitmap */
+unsigned long __init bootmem_bootmap_pages (unsigned long pages)
+{
+ unsigned long mapsize;
+
+ mapsize = (pages+7)/8;
+ mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
+ mapsize >>= PAGE_SHIFT;
+
+ return mapsize;
+}
+
+/*
+ * Called once to set up the allocator itself.
+ */
+static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
+ unsigned long mapstart, unsigned long start, unsigned long end)
+{
+ bootmem_data_t *bdata = pgdat->bdata;
+ unsigned long mapsize = ((end - start)+7)/8;
+
+ pgdat->node_next = pgdat_list;
+ pgdat_list = pgdat;
+
+ mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
+ bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
+ bdata->node_boot_start = (start << PAGE_SHIFT);
+ bdata->node_low_pfn = end;
+
+ /*
+ * Initially all pages are reserved - setup_arch() has to
+ * register free RAM areas explicitly.
+ */
+ memset(bdata->node_bootmem_map, 0xff, mapsize);
+
+ return mapsize;
+}
+
+/*
+ * Marks a particular physical memory range as unallocatable. Usable RAM
+ * might be used for boot-time allocations - or it might get added
+ * to the free page pool later on.
+ */
+static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
+{
+ unsigned long i;
+
+ if (addr >= bdata->node_boot_start) {
+ /*
+ * round up, partially reserved pages are considered
+ * fully reserved.
+ */
+ unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE;
+ unsigned long eidx = (addr + size - bdata->node_boot_start +
+ PAGE_SIZE-1)/PAGE_SIZE;
+ unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;
+
+ if (!size) BUG();
+
+ if (sidx < 0)
+ BUG();
+ if (eidx < 0)
+ BUG();
+ if (sidx >= eidx)
+ BUG();
+ if ((addr >> PAGE_SHIFT) >= bdata->node_low_pfn)
+ BUG();
+ if (end > bdata->node_low_pfn)
+ BUG();
+ for (i = sidx; i < eidx; i++)
+ if (test_and_set_bit(i, bdata->node_bootmem_map))
+ printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
+ } else {
+ printk(KERN_WARNING
+ "reserve_bootmem_core: address %08lx below node_boot_start\n", addr);
+ }
+}
+
+static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
+{
+ unsigned long i;
+ unsigned long start;
+ /*
+ * round down end of usable mem, partially free pages are
+ * considered reserved.
+ */
+ unsigned long sidx;
+ unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
+ unsigned long end = (addr + size)/PAGE_SIZE;
+
+ if (!size) BUG();
+ if (end > bdata->node_low_pfn)
+ BUG();
+
+ /*
+ * Round up the beginning of the address.
+ */
+ start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
+ sidx = start - (bdata->node_boot_start/PAGE_SIZE);
+
+ for (i = sidx; i < eidx; i++) {
+ if (!test_and_clear_bit(i, bdata->node_bootmem_map))
+ BUG();
+ }
+}
+
+/*
+ * We 'merge' subsequent allocations to save space. We might 'lose'
+ * some fraction of a page if allocations cannot be satisfied due to
+ * size constraints on boxes where there is physical RAM space
+ * fragmentation - in these cases * (mostly large memory boxes) this
+ * is not a problem.
+ *
+ * On low memory boxes we get it right in 100% of the cases.
+ */
+
+/*
+ * alignment has to be a power of 2 value.
+ */
+static void * __init __alloc_bootmem_core (bootmem_data_t *bdata,
+ unsigned long size, unsigned long align, unsigned long goal)
+{
+ unsigned long i, start = 0;
+ void *ret;
+ unsigned long offset, remaining_size;
+ unsigned long areasize, preferred, incr;
+ unsigned long eidx = bdata->node_low_pfn - (bdata->node_boot_start >>
+ PAGE_SHIFT);
+
+ if (!size) BUG();
+
+ if (align & (align-1))
+ BUG();
+
+ offset = 0;
+ if (align &&
+ (bdata->node_boot_start & (align - 1UL)) != 0)
+ offset = (align - (bdata->node_boot_start & (align - 1UL)));
+ offset >>= PAGE_SHIFT;
+
+ /*
+ * We try to allocate bootmem pages above 'goal'
+ * first, then we try to allocate lower pages.
+ */
+ if (goal && (goal >= bdata->node_boot_start) &&
+ ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
+ preferred = goal - bdata->node_boot_start;
+ } else
+ preferred = 0;
+
+ preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT;
+ preferred += offset;
+ areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
+ incr = align >> PAGE_SHIFT ? : 1;
+
+restart_scan:
+ for (i = preferred; i < eidx; i += incr) {
+ unsigned long j;
+ if (test_bit(i, bdata->node_bootmem_map))
+ continue;
+ for (j = i + 1; j < i + areasize; ++j) {
+ if (j >= eidx)
+ goto fail_block;
+ if (test_bit (j, bdata->node_bootmem_map))
+ goto fail_block;
+ }
+ start = i;
+ goto found;
+ fail_block:;
+ }
+ if (preferred) {
+ preferred = offset;
+ goto restart_scan;
+ }
+ return NULL;
+found:
+ if (start >= eidx)
+ BUG();
+
+ /*
+ * Is the next page of the previous allocation-end the start
+ * of this allocation's buffer? If yes then we can 'merge'
+ * the previous partial page with this allocation.
+ */
+ if (align <= PAGE_SIZE
+ && bdata->last_offset && bdata->last_pos+1 == start) {
+ offset = (bdata->last_offset+align-1) & ~(align-1);
+ if (offset > PAGE_SIZE)
+ BUG();
+ remaining_size = PAGE_SIZE-offset;
+ if (size < remaining_size) {
+ areasize = 0;
+ // last_pos unchanged
+ bdata->last_offset = offset+size;
+ ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
+ bdata->node_boot_start);
+ } else {
+ remaining_size = size - remaining_size;
+ areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
+ ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
+ bdata->node_boot_start);
+ bdata->last_pos = start+areasize-1;
+ bdata->last_offset = remaining_size;
+ }
+ bdata->last_offset &= ~PAGE_MASK;
+ } else {
+ bdata->last_pos = start + areasize - 1;
+ bdata->last_offset = size & ~PAGE_MASK;
+ ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
+ }
+ /*
+ * Reserve the area now:
+ */
+ for (i = start; i < start+areasize; i++)
+ if (test_and_set_bit(i, bdata->node_bootmem_map))
+ BUG();
+ memset(ret, 0, size);
+ return ret;
+}
+
+static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
+{
+ struct page *page = pgdat->node_mem_map;
+ bootmem_data_t *bdata = pgdat->bdata;
+ unsigned long i, count, total = 0;
+ unsigned long idx;
+
+ if (!bdata->node_bootmem_map) BUG();
+
+ count = 0;
+ idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+ for (i = 0; i < idx; i++, page++) {
+ if (!test_bit(i, bdata->node_bootmem_map)) {
+ count++;
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ __free_page(page);
+ }
+ }
+ total += count;
+
+ /*
+ * Now free the allocator bitmap itself, it's not
+ * needed anymore:
+ */
+ page = virt_to_page(bdata->node_bootmem_map);
+ count = 0;
+#if !defined(__arm__)
+ for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
+ count++;
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ __free_page(page);
+ }
+#endif
+ total += count;
+ bdata->node_bootmem_map = NULL;
+
+ return total;
+}
+
+unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn)
+{
+ return(init_bootmem_core(pgdat, freepfn, startpfn, endpfn));
+}
+
+void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
+{
+ reserve_bootmem_core(pgdat->bdata, physaddr, size);
+}
+
+void __init free_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
+{
+ return(free_bootmem_core(pgdat->bdata, physaddr, size));
+}
+
+unsigned long __init free_all_bootmem_node (pg_data_t *pgdat)
+{
+ return(free_all_bootmem_core(pgdat));
+}
+
+unsigned long __init init_bootmem (unsigned long start, unsigned long pages)
+{
+ max_low_pfn = pages;
+ min_low_pfn = start;
+ return(init_bootmem_core(&contig_page_data, start, 0, pages));
+}
+
+void __init reserve_bootmem (unsigned long addr, unsigned long size)
+{
+ reserve_bootmem_core(contig_page_data.bdata, addr, size);
+}
+
+void __init free_bootmem (unsigned long addr, unsigned long size)
+{
+ return(free_bootmem_core(contig_page_data.bdata, addr, size));
+}
+
+unsigned long __init free_all_bootmem (void)
+{
+ return(free_all_bootmem_core(&contig_page_data));
+}
+
+void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
+{
+ pg_data_t *pgdat;
+ void *ptr;
+
+ for_each_pgdat(pgdat)
+ if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
+ align, goal)))
+ return(ptr);
+
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
+}
+
+void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
+{
+ void *ptr;
+
+ ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
+ if (ptr)
+ return (ptr);
+
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
+}
+
diff --git a/uClinux-2.4.20-uc1/mmnommu/filemap.c b/uClinux-2.4.20-uc1/mmnommu/filemap.c
new file mode 100644
index 0000000..7af9690
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/filemap.c
@@ -0,0 +1,3247 @@
+/*
+ * linux/mm/filemap.c
+ *
+ * Copyright (C) 1994-1999 Linus Torvalds
+ * Copyright (c) 2001 Lineo Inc., David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ */
+
+/*
+ * This file handles the generic file mmap semantics used by
+ * most "normal" filesystems (but you don't /have/ to use this:
+ * the NFS filesystem used to do this differently, for example)
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/locks.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/blkdev.h>
+#include <linux/file.h>
+#include <linux/swapctl.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/iobuf.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/mman.h>
+#ifdef NO_MM
+#include <asm/virtconvert.h>
+#endif
+
+#include <linux/highmem.h>
+
+/*
+ * Shared mappings implemented 30.11.1994. It's not fully working yet,
+ * though.
+ *
+ * Shared mappings now work. 15.8.1995 Bruno.
+ *
+ * finished 'unifying' the page and buffer cache and SMP-threaded the
+ * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
+ *
+ * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
+ */
+
+atomic_t page_cache_size = ATOMIC_INIT(0);
+unsigned int page_hash_bits;
+struct page **page_hash_table;
+
+int vm_max_readahead = 31;
+int vm_min_readahead = 3;
+EXPORT_SYMBOL(vm_max_readahead);
+EXPORT_SYMBOL(vm_min_readahead);
+
+
+spinlock_cacheline_t pagecache_lock_cacheline = {SPIN_LOCK_UNLOCKED};
+/*
+ * NOTE: to avoid deadlocking you must never acquire the pagemap_lru_lock
+ * with the pagecache_lock held.
+ *
+ * Ordering:
+ * swap_lock ->
+ * pagemap_lru_lock ->
+ * pagecache_lock
+ */
+spinlock_cacheline_t pagemap_lru_lock_cacheline = {SPIN_LOCK_UNLOCKED};
+
+#define CLUSTER_PAGES (1 << page_cluster)
+#define CLUSTER_OFFSET(x) (((x) >> page_cluster) << page_cluster)
+
+static void FASTCALL(add_page_to_hash_queue(struct page * page, struct page **p));
+static void add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ struct page *next = *p;
+
+ *p = page;
+ page->next_hash = next;
+ page->pprev_hash = p;
+ if (next)
+ next->pprev_hash = &page->next_hash;
+ if (page->buffers)
+ PAGE_BUG(page);
+ atomic_inc(&page_cache_size);
+}
+
+static inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
+{
+ struct list_head *head = &mapping->clean_pages;
+
+ mapping->nrpages++;
+ list_add(&page->list, head);
+ page->mapping = mapping;
+}
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct address_space * mapping = page->mapping;
+
+ mapping->nrpages--;
+ list_del(&page->list);
+ page->mapping = NULL;
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page *next = page->next_hash;
+ struct page **pprev = page->pprev_hash;
+
+ if (next)
+ next->pprev_hash = pprev;
+ *pprev = next;
+ page->pprev_hash = NULL;
+ atomic_dec(&page_cache_size);
+}
+
+/*
+ * Remove a page from the page cache and free it. Caller has to make
+ * sure the page is locked and that nobody else uses it - or that usage
+ * is safe.
+ */
+void __remove_inode_page(struct page *page)
+{
+ if (PageDirty(page) && !PageSwapCache(page))
+ BUG();
+ remove_page_from_inode_queue(page);
+ remove_page_from_hash_queue(page);
+}
+
+void remove_inode_page(struct page *page)
+{
+ if (!PageLocked(page))
+ PAGE_BUG(page);
+
+ spin_lock(&pagecache_lock);
+ __remove_inode_page(page);
+ spin_unlock(&pagecache_lock);
+}
+
+static inline int sync_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
+ return mapping->a_ops->sync_page(page);
+ return 0;
+}
+
+/*
+ * Add a page to the dirty page list.
+ */
+void set_page_dirty(struct page *page)
+{
+ if (!test_and_set_bit(PG_dirty, &page->flags)) {
+ struct address_space *mapping = page->mapping;
+
+ if (mapping) {
+ spin_lock(&pagecache_lock);
+ mapping = page->mapping;
+ if (mapping) { /* may have been truncated */
+ list_del(&page->list);
+ list_add(&page->list, &mapping->dirty_pages);
+ }
+ spin_unlock(&pagecache_lock);
+
+ if (mapping && mapping->host)
+ mark_inode_dirty_pages(mapping->host);
+ }
+ }
+}
+
+/**
+ * invalidate_inode_pages - Invalidate all the unlocked pages of one inode
+ * @inode: the inode which pages we want to invalidate
+ *
+ * This function only removes the unlocked pages, if you want to
+ * remove all the pages of one inode, you must call truncate_inode_pages.
+ */
+
+void invalidate_inode_pages(struct inode * inode)
+{
+ struct list_head *head, *curr;
+ struct page * page;
+
+ head = &inode->i_mapping->clean_pages;
+
+ spin_lock(&pagemap_lru_lock);
+ spin_lock(&pagecache_lock);
+ curr = head->next;
+
+ while (curr != head) {
+ page = list_entry(curr, struct page, list);
+ curr = curr->next;
+
+ /* We cannot invalidate something in dirty.. */
+ if (PageDirty(page))
+ continue;
+
+ /* ..or locked */
+ if (TryLockPage(page))
+ continue;
+
+ if (page->buffers && !try_to_free_buffers(page, 0))
+ goto unlock;
+
+ if (page_count(page) != 1)
+ goto unlock;
+
+ __lru_cache_del(page);
+ __remove_inode_page(page);
+ UnlockPage(page);
+ page_cache_release(page);
+ continue;
+unlock:
+ UnlockPage(page);
+ continue;
+ }
+
+ spin_unlock(&pagecache_lock);
+ spin_unlock(&pagemap_lru_lock);
+}
+
+static int do_flushpage(struct page *page, unsigned long offset)
+{
+ int (*flushpage) (struct page *, unsigned long);
+ flushpage = page->mapping->a_ops->flushpage;
+ if (flushpage)
+ return (*flushpage)(page, offset);
+ return block_flushpage(page, offset);
+}
+
+static inline void truncate_partial_page(struct page *page, unsigned partial)
+{
+ memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
+ if (page->buffers)
+ do_flushpage(page, partial);
+}
+
+static void truncate_complete_page(struct page *page)
+{
+ /* Leave it on the LRU if it gets converted into anonymous buffers */
+ if (!page->buffers || do_flushpage(page, 0))
+ lru_cache_del(page);
+
+ /*
+ * We remove the page from the page cache _after_ we have
+ * destroyed all buffer-cache references to it. Otherwise some
+ * other process might think this inode page is not in the
+ * page cache and creates a buffer-cache alias to it causing
+ * all sorts of fun problems ...
+ */
+ ClearPageDirty(page);
+ ClearPageUptodate(page);
+ remove_inode_page(page);
+ page_cache_release(page);
+}
+
+static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));
+static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial)
+{
+ struct list_head *curr;
+ struct page * page;
+ int unlocked = 0;
+
+ restart:
+ curr = head->prev;
+ while (curr != head) {
+ unsigned long offset;
+
+ page = list_entry(curr, struct page, list);
+ offset = page->index;
+
+ /* Is one of the pages to truncate? */
+ if ((offset >= start) || (*partial && (offset + 1) == start)) {
+ int failed;
+
+ page_cache_get(page);
+ failed = TryLockPage(page);
+
+ list_del(head);
+ if (!failed)
+ /* Restart after this page */
+ list_add_tail(head, curr);
+ else
+ /* Restart on this page */
+ list_add(head, curr);
+
+ spin_unlock(&pagecache_lock);
+ unlocked = 1;
+
+ if (!failed) {
+ if (*partial && (offset + 1) == start) {
+ truncate_partial_page(page, *partial);
+ *partial = 0;
+ } else
+ truncate_complete_page(page);
+
+ UnlockPage(page);
+ } else
+ wait_on_page(page);
+
+ page_cache_release(page);
+
+ if (current->need_resched) {
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ }
+
+ spin_lock(&pagecache_lock);
+ goto restart;
+ }
+ curr = curr->prev;
+ }
+ return unlocked;
+}
+
+
+/**
+ * truncate_inode_pages - truncate *all* the pages from an offset
+ * @mapping: mapping to truncate
+ * @lstart: offset from with to truncate
+ *
+ * Truncate the page cache at a set offset, removing the pages
+ * that are beyond that offset (and zeroing out partial pages).
+ * If any page is locked we wait for it to become unlocked.
+ */
+void truncate_inode_pages(struct address_space * mapping, loff_t lstart)
+{
+ unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+ int unlocked;
+
+ spin_lock(&pagecache_lock);
+ do {
+ unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial);
+ unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial);
+ unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
+ } while (unlocked);
+ /* Traversed all three lists without dropping the lock */
+ spin_unlock(&pagecache_lock);
+}
+
+static inline int invalidate_this_page2(struct page * page,
+ struct list_head * curr,
+ struct list_head * head)
+{
+ int unlocked = 1;
+
+ /*
+ * The page is locked and we hold the pagecache_lock as well
+ * so both page_count(page) and page->buffers stays constant here.
+ */
+ if (page_count(page) == 1 + !!page->buffers) {
+ /* Restart after this page */
+ list_del(head);
+ list_add_tail(head, curr);
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+ truncate_complete_page(page);
+ } else {
+ if (page->buffers) {
+ /* Restart after this page */
+ list_del(head);
+ list_add_tail(head, curr);
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+ block_invalidate_page(page);
+ } else
+ unlocked = 0;
+
+ ClearPageDirty(page);
+ ClearPageUptodate(page);
+ }
+
+ return unlocked;
+}
+
+static int FASTCALL(invalidate_list_pages2(struct list_head *));
+static int invalidate_list_pages2(struct list_head *head)
+{
+ struct list_head *curr;
+ struct page * page;
+ int unlocked = 0;
+
+ restart:
+ curr = head->prev;
+ while (curr != head) {
+ page = list_entry(curr, struct page, list);
+
+ if (!TryLockPage(page)) {
+ int __unlocked;
+
+ __unlocked = invalidate_this_page2(page, curr, head);
+ UnlockPage(page);
+ unlocked |= __unlocked;
+ if (!__unlocked) {
+ curr = curr->prev;
+ continue;
+ }
+ } else {
+ /* Restart on this page */
+ list_del(head);
+ list_add(head, curr);
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+ unlocked = 1;
+ wait_on_page(page);
+ }
+
+ page_cache_release(page);
+ if (current->need_resched) {
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ }
+
+ spin_lock(&pagecache_lock);
+ goto restart;
+ }
+ return unlocked;
+}
+
+/**
+ * invalidate_inode_pages2 - Clear all the dirty bits around if it can't
+ * free the pages because they're mapped.
+ * @mapping: the address_space which pages we want to invalidate
+ */
+void invalidate_inode_pages2(struct address_space * mapping)
+{
+ int unlocked;
+
+ spin_lock(&pagecache_lock);
+ do {
+ unlocked = invalidate_list_pages2(&mapping->clean_pages);
+ unlocked |= invalidate_list_pages2(&mapping->dirty_pages);
+ unlocked |= invalidate_list_pages2(&mapping->locked_pages);
+ } while (unlocked);
+ spin_unlock(&pagecache_lock);
+}
+
+static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
+{
+ goto inside;
+
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->mapping != mapping)
+ continue;
+ if (page->index == offset)
+ break;
+ }
+
+not_found:
+ return page;
+}
+
+static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *))
+{
+ struct list_head *curr;
+ struct page *page;
+ int retval = 0;
+
+ spin_lock(&pagecache_lock);
+ curr = head->next;
+ while (curr != head) {
+ page = list_entry(curr, struct page, list);
+ curr = curr->next;
+ if (!page->buffers)
+ continue;
+ if (page->index >= end)
+ continue;
+ if (page->index < start)
+ continue;
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+ lock_page(page);
+
+ /* The buffers could have been free'd while we waited for the page lock */
+ if (page->buffers)
+ retval |= fn(page);
+
+ UnlockPage(page);
+ spin_lock(&pagecache_lock);
+ curr = page->list.next;
+ page_cache_release(page);
+ }
+ spin_unlock(&pagecache_lock);
+
+ return retval;
+}
+
+/*
+ * Two-stage data sync: first start the IO, then go back and
+ * collect the information..
+ */
+int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx)
+{
+ int retval;
+
+ /* writeout dirty buffers on pages from both clean and dirty lists */
+ retval = do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, writeout_one_page);
+ retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, writeout_one_page);
+ retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, writeout_one_page);
+
+ /* now wait for locked buffers on pages from both clean and dirty lists */
+ retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, waitfor_one_page);
+ retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, waitfor_one_page);
+ retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, waitfor_one_page);
+
+ return retval;
+}
+
+/*
+ * In-memory filesystems have to fail their
+ * writepage function - and this has to be
+ * worked around in the VM layer..
+ *
+ * We
+ * - mark the page dirty again (but do NOT
+ * add it back to the inode dirty list, as
+ * that would livelock in fdatasync)
+ * - activate the page so that the page stealer
+ * doesn't try to write it out over and over
+ * again.
+ */
+int fail_writepage(struct page *page)
+{
+ /* Only activate on memory-pressure, not fsync.. */
+ if (PageLaunder(page)) {
+ activate_page(page);
+ SetPageReferenced(page);
+ }
+
+ /* Set the page dirty again, unlock */
+ SetPageDirty(page);
+ UnlockPage(page);
+ return 0;
+}
+
+EXPORT_SYMBOL(fail_writepage);
+
+/**
+ * filemap_fdatasync - walk the list of dirty pages of the given address space
+ * and writepage() all of them.
+ *
+ * @mapping: address space structure to write
+ *
+ */
+int filemap_fdatasync(struct address_space * mapping)
+{
+ int ret = 0;
+ int (*writepage)(struct page *) = mapping->a_ops->writepage;
+
+ spin_lock(&pagecache_lock);
+
+ while (!list_empty(&mapping->dirty_pages)) {
+ struct page *page = list_entry(mapping->dirty_pages.prev, struct page, list);
+
+ list_del(&page->list);
+ list_add(&page->list, &mapping->locked_pages);
+
+ if (!PageDirty(page))
+ continue;
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+
+ lock_page(page);
+
+ if (PageDirty(page)) {
+ int err;
+ ClearPageDirty(page);
+ err = writepage(page);
+ if (err && !ret)
+ ret = err;
+ } else
+ UnlockPage(page);
+
+ page_cache_release(page);
+ spin_lock(&pagecache_lock);
+ }
+ spin_unlock(&pagecache_lock);
+ return ret;
+}
+
+/**
+ * filemap_fdatawait - walk the list of locked pages of the given address space
+ * and wait for all of them.
+ *
+ * @mapping: address space structure to wait for
+ *
+ */
+int filemap_fdatawait(struct address_space * mapping)
+{
+ int ret = 0;
+
+ spin_lock(&pagecache_lock);
+
+ while (!list_empty(&mapping->locked_pages)) {
+ struct page *page = list_entry(mapping->locked_pages.next, struct page, list);
+
+ list_del(&page->list);
+ list_add(&page->list, &mapping->clean_pages);
+
+ if (!PageLocked(page))
+ continue;
+
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+
+ ___wait_on_page(page);
+ if (PageError(page))
+ ret = -EIO;
+
+ page_cache_release(page);
+ spin_lock(&pagecache_lock);
+ }
+ spin_unlock(&pagecache_lock);
+ return ret;
+}
+
+/*
+ * Add a page to the inode page cache.
+ *
+ * The caller must have locked the page and
+ * set all the page flags correctly..
+ */
+void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
+{
+ if (!PageLocked(page))
+ BUG();
+
+ page->index = index;
+ page_cache_get(page);
+ spin_lock(&pagecache_lock);
+ add_page_to_inode_queue(mapping, page);
+ add_page_to_hash_queue(page, page_hash(mapping, index));
+ spin_unlock(&pagecache_lock);
+
+ lru_cache_add(page);
+}
+
+/*
+ * This adds a page to the page cache, starting out as locked,
+ * owned by us, but unreferenced, not uptodate and with no errors.
+ */
+static inline void __add_to_page_cache(struct page * page,
+ struct address_space *mapping, unsigned long offset,
+ struct page **hash)
+{
+ unsigned long flags;
+
+ flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked);
+ page->flags = flags | (1 << PG_locked);
+ page_cache_get(page);
+ page->index = offset;
+ add_page_to_inode_queue(mapping, page);
+ add_page_to_hash_queue(page, hash);
+}
+
+void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
+{
+ spin_lock(&pagecache_lock);
+ __add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));
+ spin_unlock(&pagecache_lock);
+ lru_cache_add(page);
+}
+
+int add_to_page_cache_unique(struct page * page,
+ struct address_space *mapping, unsigned long offset,
+ struct page **hash)
+{
+ int err;
+ struct page *alias;
+
+ spin_lock(&pagecache_lock);
+ alias = __find_page_nolock(mapping, offset, *hash);
+
+ err = 1;
+ if (!alias) {
+ __add_to_page_cache(page,mapping,offset,hash);
+ err = 0;
+ }
+
+ spin_unlock(&pagecache_lock);
+ if (!err)
+ lru_cache_add(page);
+ return err;
+}
+
+/*
+ * This adds the requested page to the page cache if it isn't already there,
+ * and schedules an I/O to read in its contents from disk.
+ */
+static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
+static int page_cache_read(struct file * file, unsigned long offset)
+{
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ struct page **hash = page_hash(mapping, offset);
+ struct page *page;
+
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, offset, *hash);
+ spin_unlock(&pagecache_lock);
+ if (page)
+ return 0;
+
+ page = page_cache_alloc(mapping);
+ if (!page)
+ return -ENOMEM;
+
+ if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
+ int error = mapping->a_ops->readpage(file, page);
+ page_cache_release(page);
+ return error;
+ }
+ /*
+ * We arrive here in the unlikely event that someone
+ * raced with us and added our page to the cache first.
+ */
+ page_cache_release(page);
+ return 0;
+}
+
+#ifndef NO_MM
+/*
+ * Read in an entire cluster at once. A cluster is usually a 64k-
+ * aligned block that includes the page requested in "offset."
+ */
+static int FASTCALL(read_cluster_nonblocking(struct file * file, unsigned long offset,
+ unsigned long filesize));
+static int read_cluster_nonblocking(struct file * file, unsigned long offset,
+ unsigned long filesize)
+{
+ unsigned long pages = CLUSTER_PAGES;
+
+ offset = CLUSTER_OFFSET(offset);
+ while ((pages-- > 0) && (offset < filesize)) {
+ int error = page_cache_read(file, offset);
+ if (error < 0)
+ return error;
+ offset ++;
+ }
+
+ return 0;
+}
+#endif /* NO_MM */
+
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+#if BITS_PER_LONG == 32
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+#elif BITS_PER_LONG == 64
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#else
+#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#endif
+
+/*
+ * In order to wait for pages to become available there must be
+ * waitqueues associated with pages. By using a hash table of
+ * waitqueues where the bucket discipline is to maintain all
+ * waiters on the same queue and wake all when any of the pages
+ * become available, and for the woken contexts to check to be
+ * sure the appropriate page became available, this saves space
+ * at a cost of "thundering herd" phenomena during rare hash
+ * collisions.
+ */
+static inline wait_queue_head_t *page_waitqueue(struct page *page)
+{
+ const zone_t *zone = page_zone(page);
+ wait_queue_head_t *wait = zone->wait_table;
+ unsigned long hash = (unsigned long)page;
+
+ if (zone->wait_table_shift == BITS_PER_LONG)
+ return &wait[0];
+
+#if BITS_PER_LONG == 64
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ unsigned long n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#else
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ hash *= GOLDEN_RATIO_PRIME;
+#endif
+ hash >>= zone->wait_table_shift;
+
+ return &wait[hash];
+}
+
+/*
+ * Wait for a page to get unlocked.
+ *
+ * This must be called with the caller "holding" the page,
+ * ie with increased "page->count" so that the page won't
+ * go away during the wait..
+ *
+ * The waiting strategy is to get on a waitqueue determined
+ * by hashing. Waiters will then collide, and the newly woken
+ * task must then determine whether it was woken for the page
+ * it really wanted, and go back to sleep on the waitqueue if
+ * that wasn't it. With the waitqueue semantics, it never leaves
+ * the waitqueue unless it calls, so the loop moves forward one
+ * iteration every time there is
+ * (1) a collision
+ * and
+ * (2) one of the colliding pages is woken
+ *
+ * This is the thundering herd problem, but it is expected to
+ * be very rare due to the few pages that are actually being
+ * waited on at any given time and the quality of the hash function.
+ */
+void ___wait_on_page(struct page *page)
+{
+ wait_queue_head_t *waitqueue = page_waitqueue(page);
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue(waitqueue, &wait);
+ do {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (!PageLocked(page))
+ break;
+ sync_page(page);
+ schedule();
+ } while (PageLocked(page));
+ __set_task_state(tsk, TASK_RUNNING);
+ remove_wait_queue(waitqueue, &wait);
+}
+
+/*
+ * unlock_page() is the other half of the story just above
+ * __wait_on_page(). Here a couple of quick checks are done
+ * and a couple of flags are set on the page, and then all
+ * of the waiters for all of the pages in the appropriate
+ * wait queue are woken.
+ */
+void unlock_page(struct page *page)
+{
+ wait_queue_head_t *waitqueue = page_waitqueue(page);
+ ClearPageLaunder(page);
+ smp_mb__before_clear_bit();
+ if (!test_and_clear_bit(PG_locked, &(page)->flags))
+ BUG();
+ smp_mb__after_clear_bit();
+
+ /*
+ * Although the default semantics of wake_up() are
+ * to wake all, here the specific function is used
+ * to make it even more explicit that a number of
+ * pages are being waited on here.
+ */
+ if (waitqueue_active(waitqueue))
+ wake_up_all(waitqueue);
+}
+
+/*
+ * Get a lock on the page, assuming we need to sleep
+ * to get it..
+ */
+static void __lock_page(struct page *page)
+{
+ wait_queue_head_t *waitqueue = page_waitqueue(page);
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue_exclusive(waitqueue, &wait);
+ for (;;) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (PageLocked(page)) {
+ sync_page(page);
+ schedule();
+ }
+ if (!TryLockPage(page))
+ break;
+ }
+ __set_task_state(tsk, TASK_RUNNING);
+ remove_wait_queue(waitqueue, &wait);
+}
+
+/*
+ * Get an exclusive lock on the page, optimistically
+ * assuming it's not locked..
+ */
+void lock_page(struct page *page)
+{
+ if (TryLockPage(page))
+ __lock_page(page);
+}
+
+/*
+ * a rather lightweight function, finding and getting a reference to a
+ * hashed page atomically.
+ */
+struct page * __find_get_page(struct address_space *mapping,
+ unsigned long offset, struct page **hash)
+{
+ struct page *page;
+
+ /*
+ * We scan the hash list read-only. Addition to and removal from
+ * the hash-list needs a held write-lock.
+ */
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, offset, *hash);
+ if (page)
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+ return page;
+}
+
+/*
+ * Same as above, but trylock it instead of incrementing the count.
+ */
+struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
+{
+ struct page *page;
+ struct page **hash = page_hash(mapping, offset);
+
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, offset, *hash);
+ if (page) {
+ if (TryLockPage(page))
+ page = NULL;
+ }
+ spin_unlock(&pagecache_lock);
+ return page;
+}
+
+/*
+ * Must be called with the pagecache lock held,
+ * will return with it held (but it may be dropped
+ * during blocking operations..
+ */
+static struct page * FASTCALL(__find_lock_page_helper(struct address_space *, unsigned long, struct page *));
+static struct page * __find_lock_page_helper(struct address_space *mapping,
+ unsigned long offset, struct page *hash)
+{
+ struct page *page;
+
+ /*
+ * We scan the hash list read-only. Addition to and removal from
+ * the hash-list needs a held write-lock.
+ */
+repeat:
+ page = __find_page_nolock(mapping, offset, hash);
+ if (page) {
+ page_cache_get(page);
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ lock_page(page);
+ spin_lock(&pagecache_lock);
+
+ /* Has the page been re-allocated while we slept? */
+ if (page->mapping != mapping || page->index != offset) {
+ UnlockPage(page);
+ page_cache_release(page);
+ goto repeat;
+ }
+ }
+ }
+ return page;
+}
+
+/*
+ * Same as the above, but lock the page too, verifying that
+ * it's still valid once we own it.
+ */
+struct page * __find_lock_page (struct address_space *mapping,
+ unsigned long offset, struct page **hash)
+{
+ struct page *page;
+
+ spin_lock(&pagecache_lock);
+ page = __find_lock_page_helper(mapping, offset, *hash);
+ spin_unlock(&pagecache_lock);
+ return page;
+}
+
+/*
+ * Same as above, but create the page if required..
+ */
+struct page * find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask)
+{
+ struct page *page;
+ struct page **hash = page_hash(mapping, index);
+
+ spin_lock(&pagecache_lock);
+ page = __find_lock_page_helper(mapping, index, *hash);
+ spin_unlock(&pagecache_lock);
+ if (!page) {
+ struct page *newpage = alloc_page(gfp_mask);
+ if (newpage) {
+ spin_lock(&pagecache_lock);
+ page = __find_lock_page_helper(mapping, index, *hash);
+ if (likely(!page)) {
+ page = newpage;
+ __add_to_page_cache(page, mapping, index, hash);
+ newpage = NULL;
+ }
+ spin_unlock(&pagecache_lock);
+ if (newpage == NULL)
+ lru_cache_add(page);
+ else
+ page_cache_release(newpage);
+ }
+ }
+ return page;
+}
+
+/*
+ * Same as grab_cache_page, but do not wait if the page is unavailable.
+ * This is intended for speculative data generators, where the data can
+ * be regenerated if the page couldn't be grabbed. This routine should
+ * be safe to call while holding the lock for another page.
+ */
+struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
+{
+ struct page *page, **hash;
+
+ hash = page_hash(mapping, index);
+ page = __find_get_page(mapping, index, hash);
+
+ if ( page ) {
+ if ( !TryLockPage(page) ) {
+ /* Page found and locked */
+ /* This test is overly paranoid, but what the heck... */
+ if ( unlikely(page->mapping != mapping || page->index != index) ) {
+ /* Someone reallocated this page under us. */
+ UnlockPage(page);
+ page_cache_release(page);
+ return NULL;
+ } else {
+ return page;
+ }
+ } else {
+ /* Page locked by someone else */
+ page_cache_release(page);
+ return NULL;
+ }
+ }
+
+ page = page_cache_alloc(mapping);
+ if ( unlikely(!page) )
+ return NULL; /* Failed to allocate a page */
+
+ if ( unlikely(add_to_page_cache_unique(page, mapping, index, hash)) ) {
+ /* Someone else grabbed the page already. */
+ page_cache_release(page);
+ return NULL;
+ }
+
+ return page;
+}
+
+#if 0
+#define PROFILE_READAHEAD
+#define DEBUG_READAHEAD
+#endif
+
+/*
+ * Read-ahead profiling information
+ * --------------------------------
+ * Every PROFILE_MAXREADCOUNT, the following information is written
+ * to the syslog:
+ * Percentage of asynchronous read-ahead.
+ * Average of read-ahead fields context value.
+ * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
+ * to the syslog.
+ */
+
+#ifdef PROFILE_READAHEAD
+
+#define PROFILE_MAXREADCOUNT 1000
+
+static unsigned long total_reada;
+static unsigned long total_async;
+static unsigned long total_ramax;
+static unsigned long total_ralen;
+static unsigned long total_rawin;
+
+static void profile_readahead(int async, struct file *filp)
+{
+ unsigned long flags;
+
+ ++total_reada;
+ if (async)
+ ++total_async;
+
+ total_ramax += filp->f_ramax;
+ total_ralen += filp->f_ralen;
+ total_rawin += filp->f_rawin;
+
+ if (total_reada > PROFILE_MAXREADCOUNT) {
+ save_flags(flags);
+ cli();
+ if (!(total_reada > PROFILE_MAXREADCOUNT)) {
+ restore_flags(flags);
+ return;
+ }
+
+ printk("Readahead average: max=%ld, len=%ld, win=%ld, async=%ld%%\n",
+ total_ramax/total_reada,
+ total_ralen/total_reada,
+ total_rawin/total_reada,
+ (total_async*100)/total_reada);
+#ifdef DEBUG_READAHEAD
+ printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%Ld\n",
+ filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
+#endif
+
+ total_reada = 0;
+ total_async = 0;
+ total_ramax = 0;
+ total_ralen = 0;
+ total_rawin = 0;
+
+ restore_flags(flags);
+ }
+}
+#endif /* defined PROFILE_READAHEAD */
+
+/*
+ * Read-ahead context:
+ * -------------------
+ * The read ahead context fields of the "struct file" are the following:
+ * - f_raend : position of the first byte after the last page we tried to
+ * read ahead.
+ * - f_ramax : current read-ahead maximum size.
+ * - f_ralen : length of the current IO read block we tried to read-ahead.
+ * - f_rawin : length of the current read-ahead window.
+ * if last read-ahead was synchronous then
+ * f_rawin = f_ralen
+ * otherwise (was asynchronous)
+ * f_rawin = previous value of f_ralen + f_ralen
+ *
+ * Read-ahead limits:
+ * ------------------
+ * MIN_READAHEAD : minimum read-ahead size when read-ahead.
+ * MAX_READAHEAD : maximum read-ahead size when read-ahead.
+ *
+ * Synchronous read-ahead benefits:
+ * --------------------------------
+ * Using reasonable IO xfer length from peripheral devices increase system
+ * performances.
+ * Reasonable means, in this context, not too large but not too small.
+ * The actual maximum value is:
+ * MAX_READAHEAD + PAGE_CACHE_SIZE = 76k is CONFIG_READA_SMALL is undefined
+ * and 32K if defined (4K page size assumed).
+ *
+ * Asynchronous read-ahead benefits:
+ * ---------------------------------
+ * Overlapping next read request and user process execution increase system
+ * performance.
+ *
+ * Read-ahead risks:
+ * -----------------
+ * We have to guess which further data are needed by the user process.
+ * If these data are often not really needed, it's bad for system
+ * performances.
+ * However, we know that files are often accessed sequentially by
+ * application programs and it seems that it is possible to have some good
+ * strategy in that guessing.
+ * We only try to read-ahead files that seems to be read sequentially.
+ *
+ * Asynchronous read-ahead risks:
+ * ------------------------------
+ * In order to maximize overlapping, we must start some asynchronous read
+ * request from the device, as soon as possible.
+ * We must be very careful about:
+ * - The number of effective pending IO read requests.
+ * ONE seems to be the only reasonable value.
+ * - The total memory pool usage for the file access stream.
+ * This maximum memory usage is implicitly 2 IO read chunks:
+ * 2*(MAX_READAHEAD + PAGE_CACHE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
+ * 64k if defined (4K page size assumed).
+ */
+
+static inline int get_max_readahead(struct inode * inode)
+{
+ if (!inode->i_dev || !max_readahead[MAJOR(inode->i_dev)])
+ return vm_max_readahead;
+ return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)];
+}
+
+static void generic_file_readahead(int reada_ok,
+ struct file * filp, struct inode * inode,
+ struct page * page)
+{
+ unsigned long end_index;
+ unsigned long index = page->index;
+ unsigned long max_ahead, ahead;
+ unsigned long raend;
+ int max_readahead = get_max_readahead(inode);
+
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+ raend = filp->f_raend;
+ max_ahead = 0;
+
+/*
+ * The current page is locked.
+ * If the current position is inside the previous read IO request, do not
+ * try to reread previously read ahead pages.
+ * Otherwise decide or not to read ahead some pages synchronously.
+ * If we are not going to read ahead, set the read ahead context for this
+ * page only.
+ */
+ if (PageLocked(page)) {
+ if (!filp->f_ralen || index >= raend || index + filp->f_rawin < raend) {
+ raend = index;
+ if (raend < end_index)
+ max_ahead = filp->f_ramax;
+ filp->f_rawin = 0;
+ filp->f_ralen = 1;
+ if (!max_ahead) {
+ filp->f_raend = index + filp->f_ralen;
+ filp->f_rawin += filp->f_ralen;
+ }
+ }
+ }
+/*
+ * The current page is not locked.
+ * If we were reading ahead and,
+ * if the current max read ahead size is not zero and,
+ * if the current position is inside the last read-ahead IO request,
+ * it is the moment to try to read ahead asynchronously.
+ * We will later force unplug device in order to force asynchronous read IO.
+ */
+ else if (reada_ok && filp->f_ramax && raend >= 1 &&
+ index <= raend && index + filp->f_ralen >= raend) {
+/*
+ * Add ONE page to max_ahead in order to try to have about the same IO max size
+ * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
+ * Compute the position of the last page we have tried to read in order to
+ * begin to read ahead just at the next page.
+ */
+ raend -= 1;
+ if (raend < end_index)
+ max_ahead = filp->f_ramax + 1;
+
+ if (max_ahead) {
+ filp->f_rawin = filp->f_ralen;
+ filp->f_ralen = 0;
+ reada_ok = 2;
+ }
+ }
+/*
+ * Try to read ahead pages.
+ * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
+ * scheduler, will work enough for us to avoid too bad actuals IO requests.
+ */
+ ahead = 0;
+ while (ahead < max_ahead) {
+ ahead ++;
+ if ((raend + ahead) >= end_index)
+ break;
+ if (page_cache_read(filp, raend + ahead) < 0)
+ break;
+ }
+/*
+ * If we tried to read ahead some pages,
+ * If we tried to read ahead asynchronously,
+ * Try to force unplug of the device in order to start an asynchronous
+ * read IO request.
+ * Update the read-ahead context.
+ * Store the length of the current read-ahead window.
+ * Double the current max read ahead size.
+ * That heuristic avoid to do some large IO for files that are not really
+ * accessed sequentially.
+ */
+ if (ahead) {
+ filp->f_ralen += ahead;
+ filp->f_rawin += filp->f_ralen;
+ filp->f_raend = raend + ahead + 1;
+
+ filp->f_ramax += filp->f_ramax;
+
+ if (filp->f_ramax > max_readahead)
+ filp->f_ramax = max_readahead;
+
+#ifdef PROFILE_READAHEAD
+ profile_readahead((reada_ok == 2), filp);
+#endif
+ }
+
+ return;
+}
+
+/*
+ * Mark a page as having seen activity.
+ *
+ * If it was already so marked, move it to the active queue and drop
+ * the referenced bit. Otherwise, just mark it for future action..
+ */
+void mark_page_accessed(struct page *page)
+{
+ if (!PageActive(page) && PageReferenced(page)) {
+ activate_page(page);
+ ClearPageReferenced(page);
+ } else
+ SetPageReferenced(page);
+}
+
+/*
+ * This is a generic file read routine, and uses the
+ * inode->i_op->readpage() function for the actual low-level
+ * stuff.
+ *
+ * This is really ugly. But the goto's actually try to clarify some
+ * of the logic when it comes to error handling etc.
+ */
+void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
+{
+ struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
+ struct inode *inode = mapping->host;
+ unsigned long index, offset;
+ struct page *cached_page;
+ int reada_ok;
+ int error;
+ int max_readahead = get_max_readahead(inode);
+
+ cached_page = NULL;
+ index = *ppos >> PAGE_CACHE_SHIFT;
+ offset = *ppos & ~PAGE_CACHE_MASK;
+
+/*
+ * If the current position is outside the previous read-ahead window,
+ * we reset the current read-ahead context and set read ahead max to zero
+ * (will be set to just needed value later),
+ * otherwise, we assume that the file accesses are sequential enough to
+ * continue read-ahead.
+ */
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ if (1 || index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
+#else
+ if (index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
+#endif
+ reada_ok = 0;
+ filp->f_raend = 0;
+ filp->f_ralen = 0;
+ filp->f_ramax = 0;
+ filp->f_rawin = 0;
+ } else {
+ reada_ok = 1;
+ }
+/*
+ * Adjust the current value of read-ahead max.
+ * If the read operation stay in the first half page, force no readahead.
+ * Otherwise try to increase read ahead max just enough to do the read request.
+ * Then, at least MIN_READAHEAD if read ahead is ok,
+ * and at most MAX_READAHEAD in all cases.
+ */
+ if (!index && offset + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
+ filp->f_ramax = 0;
+ } else {
+ unsigned long needed;
+
+ needed = ((offset + desc->count) >> PAGE_CACHE_SHIFT) + 1;
+
+ if (filp->f_ramax < needed)
+ filp->f_ramax = needed;
+
+ if (reada_ok && filp->f_ramax < vm_min_readahead)
+ filp->f_ramax = vm_min_readahead;
+ if (filp->f_ramax > max_readahead)
+ filp->f_ramax = max_readahead;
+ }
+
+ for (;;) {
+ struct page *page, **hash;
+ unsigned long end_index, nr, ret;
+
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+ if (index > end_index)
+ break;
+ nr = PAGE_CACHE_SIZE;
+ if (index == end_index) {
+ nr = inode->i_size & ~PAGE_CACHE_MASK;
+ if (nr <= offset)
+ break;
+ }
+
+ nr = nr - offset;
+
+ /*
+ * Try to find the data in the page cache..
+ */
+ hash = page_hash(mapping, index);
+
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, index, *hash);
+ if (!page)
+ goto no_cached_page;
+found_page:
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+
+ if (!Page_Uptodate(page))
+ goto page_not_up_to_date;
+ generic_file_readahead(reada_ok, filp, inode, page);
+page_ok:
+#ifndef NO_MM
+ /* If users can be writing to this page using arbitrary
+ * virtual addresses, take care about potential aliasing
+ * before reading the page on the kernel side.
+ */
+ if (mapping->i_mmap_shared != NULL)
+ flush_dcache_page(page);
+#endif /* NO_MM */
+
+ /*
+ * Mark the page accessed if we read the
+ * beginning or we just did an lseek.
+ */
+ if (!offset || !filp->f_reada)
+ mark_page_accessed(page);
+
+ /*
+ * Ok, we have the page, and it's up-to-date, so
+ * now we can copy it to user space...
+ *
+ * The actor routine returns how many bytes were actually used..
+ * NOTE! This may not be the same as how much of a user buffer
+ * we filled up (we may be padding etc), so we can only update
+ * "pos" here (the actor routine has to update the user buffer
+ * pointers and the remaining count).
+ */
+ ret = actor(desc, page, offset, nr);
+ offset += ret;
+ index += offset >> PAGE_CACHE_SHIFT;
+ offset &= ~PAGE_CACHE_MASK;
+
+ page_cache_release(page);
+ if (ret == nr && desc->count)
+ continue;
+ break;
+
+/*
+ * Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
+ */
+page_not_up_to_date:
+ generic_file_readahead(reada_ok, filp, inode, page);
+
+ if (Page_Uptodate(page))
+ goto page_ok;
+
+ /* Get exclusive access to the page ... */
+ lock_page(page);
+
+ /* Did it get unhashed before we got the lock? */
+ if (!page->mapping) {
+ UnlockPage(page);
+ page_cache_release(page);
+ continue;
+ }
+
+ /* Did somebody else fill it already? */
+ if (Page_Uptodate(page)) {
+ UnlockPage(page);
+ goto page_ok;
+ }
+
+readpage:
+ /* ... and start the actual read. The read will unlock the page. */
+ error = mapping->a_ops->readpage(filp, page);
+
+ if (!error) {
+ if (Page_Uptodate(page))
+ goto page_ok;
+
+ /* Again, try some read-ahead while waiting for the page to finish.. */
+ generic_file_readahead(reada_ok, filp, inode, page);
+ wait_on_page(page);
+ if (Page_Uptodate(page))
+ goto page_ok;
+ error = -EIO;
+ }
+
+ /* UHHUH! A synchronous read error occurred. Report it */
+ desc->error = error;
+ page_cache_release(page);
+ break;
+
+no_cached_page:
+ /*
+ * Ok, it wasn't cached, so we need to create a new
+ * page..
+ *
+ * We get here with the page cache lock held.
+ */
+ if (!cached_page) {
+ spin_unlock(&pagecache_lock);
+ cached_page = page_cache_alloc(mapping);
+ if (!cached_page) {
+ desc->error = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Somebody may have added the page while we
+ * dropped the page cache lock. Check for that.
+ */
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, index, *hash);
+ if (page)
+ goto found_page;
+ }
+
+ /*
+ * Ok, add the new page to the hash-queues...
+ */
+ page = cached_page;
+ __add_to_page_cache(page, mapping, index, hash);
+ spin_unlock(&pagecache_lock);
+ lru_cache_add(page);
+ cached_page = NULL;
+
+ goto readpage;
+ }
+
+ *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+ filp->f_reada = 1;
+ if (cached_page)
+ page_cache_release(cached_page);
+ UPDATE_ATIME(inode);
+}
+
+static ssize_t generic_file_direct_IO(int rw, struct file * filp, char * buf, size_t count, loff_t offset)
+{
+ ssize_t retval;
+ int new_iobuf, chunk_size, blocksize_mask, blocksize, blocksize_bits, iosize, progress;
+ struct kiobuf * iobuf;
+ struct address_space * mapping = filp->f_dentry->d_inode->i_mapping;
+ struct inode * inode = mapping->host;
+ loff_t size = inode->i_size;
+
+ new_iobuf = 0;
+ iobuf = filp->f_iobuf;
+ if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
+ /*
+ * A parallel read/write is using the preallocated iobuf
+ * so just run slow and allocate a new one.
+ */
+ retval = alloc_kiovec(1, &iobuf);
+ if (retval)
+ goto out;
+ new_iobuf = 1;
+ }
+
+ blocksize = 1 << inode->i_blkbits;
+ blocksize_bits = inode->i_blkbits;
+ blocksize_mask = blocksize - 1;
+ chunk_size = KIO_MAX_ATOMIC_IO << 10;
+
+ retval = -EINVAL;
+ if ((offset & blocksize_mask) || (count & blocksize_mask))
+ goto out_free;
+ if (!mapping->a_ops->direct_IO)
+ goto out_free;
+
+ if ((rw == READ) && (offset + count > size))
+ count = size - offset;
+
+ /*
+ * Flush to disk exclusively the _data_, metadata must remain
+ * completly asynchronous or performance will go to /dev/null.
+ */
+ retval = filemap_fdatasync(mapping);
+ if (retval == 0)
+ retval = fsync_inode_data_buffers(inode);
+ if (retval == 0)
+ retval = filemap_fdatawait(mapping);
+ if (retval < 0)
+ goto out_free;
+
+ progress = retval = 0;
+ while (count > 0) {
+ iosize = count;
+ if (iosize > chunk_size)
+ iosize = chunk_size;
+
+ retval = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
+ if (retval)
+ break;
+
+ retval = mapping->a_ops->direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize);
+
+ if (rw == READ && retval > 0)
+ mark_dirty_kiobuf(iobuf, retval);
+
+ if (retval >= 0) {
+ count -= retval;
+ buf += retval;
+ /* warning: weird semantics here, we're reporting a read behind the end of the file */
+ progress += retval;
+ }
+
+ unmap_kiobuf(iobuf);
+
+ if (retval != iosize)
+ break;
+ }
+
+ if (progress)
+ retval = progress;
+
+ out_free:
+ if (!new_iobuf)
+ clear_bit(0, &filp->f_iobuf_lock);
+ else
+ free_kiovec(1, &iobuf);
+ out:
+ return retval;
+}
+
+int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
+{
+ char *kaddr;
+ unsigned long left, count = desc->count;
+
+ if (size > count)
+ size = count;
+
+ kaddr = kmap(page);
+ left = __copy_to_user(desc->buf, kaddr + offset, size);
+ kunmap(page);
+
+ if (left) {
+ size -= left;
+ desc->error = -EFAULT;
+ }
+ desc->count = count - size;
+ desc->written += size;
+ desc->buf += size;
+ return size;
+}
+
+/*
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
+{
+ ssize_t retval;
+
+ if ((ssize_t) count < 0)
+ return -EINVAL;
+
+ if (filp->f_flags & O_DIRECT)
+ goto o_direct;
+
+ retval = -EFAULT;
+ if (access_ok(VERIFY_WRITE, buf, count)) {
+ retval = 0;
+
+ if (count) {
+ read_descriptor_t desc;
+
+ desc.written = 0;
+ desc.count = count;
+ desc.buf = buf;
+ desc.error = 0;
+ do_generic_file_read(filp, ppos, &desc, file_read_actor);
+
+ retval = desc.written;
+ if (!retval)
+ retval = desc.error;
+ }
+ }
+ out:
+ return retval;
+
+ o_direct:
+ {
+ loff_t pos = *ppos, size;
+ struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
+ struct inode *inode = mapping->host;
+
+ retval = 0;
+ if (!count)
+ goto out; /* skip atime */
+ size = inode->i_size;
+ if (pos < size) {
+ retval = generic_file_direct_IO(READ, filp, buf, count, pos);
+ if (retval > 0)
+ *ppos = pos + retval;
+ }
+ UPDATE_ATIME(filp->f_dentry->d_inode);
+ goto out;
+ }
+}
+
+static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset , unsigned long size)
+{
+ ssize_t written;
+ unsigned long count = desc->count;
+ struct file *file = (struct file *) desc->buf;
+
+ if (size > count)
+ size = count;
+
+ if (file->f_op->sendpage) {
+ written = file->f_op->sendpage(file, page, offset,
+ size, &file->f_pos, size<count);
+ } else {
+ char *kaddr;
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ kaddr = kmap(page);
+ written = file->f_op->write(file, kaddr + offset, size, &file->f_pos);
+ kunmap(page);
+
+ set_fs(old_fs);
+ }
+ if (written < 0) {
+ desc->error = written;
+ written = 0;
+ }
+ desc->count = count - written;
+ desc->written += written;
+ return written;
+}
+
+asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
+{
+ ssize_t retval;
+ struct file * in_file, * out_file;
+ struct inode * in_inode, * out_inode;
+
+ /*
+ * Get input file, and verify that it is ok..
+ */
+ retval = -EBADF;
+ in_file = fget(in_fd);
+ if (!in_file)
+ goto out;
+ if (!(in_file->f_mode & FMODE_READ))
+ goto fput_in;
+ retval = -EINVAL;
+ in_inode = in_file->f_dentry->d_inode;
+ if (!in_inode)
+ goto fput_in;
+ if (!in_inode->i_mapping->a_ops->readpage)
+ goto fput_in;
+ retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, in_file->f_pos, count);
+ if (retval)
+ goto fput_in;
+
+ /*
+ * Get output file, and verify that it is ok..
+ */
+ retval = -EBADF;
+ out_file = fget(out_fd);
+ if (!out_file)
+ goto fput_in;
+ if (!(out_file->f_mode & FMODE_WRITE))
+ goto fput_out;
+ retval = -EINVAL;
+ if (!out_file->f_op || !out_file->f_op->write)
+ goto fput_out;
+ out_inode = out_file->f_dentry->d_inode;
+ retval = locks_verify_area(FLOCK_VERIFY_WRITE, out_inode, out_file, out_file->f_pos, count);
+ if (retval)
+ goto fput_out;
+
+ retval = 0;
+ if (count) {
+ read_descriptor_t desc;
+ loff_t pos = 0, *ppos;
+
+ retval = -EFAULT;
+ ppos = &in_file->f_pos;
+ if (offset) {
+ if (get_user(pos, offset))
+ goto fput_out;
+ ppos = &pos;
+ }
+
+ desc.written = 0;
+ desc.count = count;
+ desc.buf = (char *) out_file;
+ desc.error = 0;
+ do_generic_file_read(in_file, ppos, &desc, file_send_actor);
+
+ retval = desc.written;
+ if (!retval)
+ retval = desc.error;
+ if (offset)
+ put_user(pos, offset);
+ }
+
+fput_out:
+ fput(out_file);
+fput_in:
+ fput(in_file);
+out:
+ return retval;
+}
+
+#ifndef NO_MM
+
+static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr)
+{
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ unsigned long max;
+
+ if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+ return -EINVAL;
+
+ /* Limit it to the size of the file.. */
+ max = (mapping->host->i_size + ~PAGE_CACHE_MASK) >> PAGE_CACHE_SHIFT;
+ if (index > max)
+ return 0;
+ max -= index;
+ if (nr > max)
+ nr = max;
+
+ /* And limit it to a sane percentage of the inactive list.. */
+ max = nr_inactive_pages / 2;
+ if (nr > max)
+ nr = max;
+
+ while (nr) {
+ page_cache_read(file, index);
+ index++;
+ nr--;
+ }
+ return 0;
+}
+
+asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+{
+ ssize_t ret;
+ struct file *file;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (file) {
+ if (file->f_mode & FMODE_READ) {
+ unsigned long start = offset >> PAGE_CACHE_SHIFT;
+ unsigned long len = (count + ((long)offset & ~PAGE_CACHE_MASK)) >> PAGE_CACHE_SHIFT;
+ ret = do_readahead(file, start, len);
+ }
+ fput(file);
+ }
+ return ret;
+}
+
+/*
+ * Read-ahead and flush behind for MADV_SEQUENTIAL areas. Since we are
+ * sure this is sequential access, we don't need a flexible read-ahead
+ * window size -- we can always use a large fixed size window.
+ */
+static void nopage_sequential_readahead(struct vm_area_struct * vma,
+ unsigned long pgoff, unsigned long filesize)
+{
+ unsigned long ra_window;
+
+ ra_window = get_max_readahead(vma->vm_file->f_dentry->d_inode);
+ ra_window = CLUSTER_OFFSET(ra_window + CLUSTER_PAGES - 1);
+
+ /* vm_raend is zero if we haven't read ahead in this area yet. */
+ if (vma->vm_raend == 0)
+ vma->vm_raend = vma->vm_pgoff + ra_window;
+
+ /*
+ * If we've just faulted the page half-way through our window,
+ * then schedule reads for the next window, and release the
+ * pages in the previous window.
+ */
+ if ((pgoff + (ra_window >> 1)) == vma->vm_raend) {
+ unsigned long start = vma->vm_pgoff + vma->vm_raend;
+ unsigned long end = start + ra_window;
+
+ if (end > ((vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff))
+ end = (vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff;
+ if (start > end)
+ return;
+
+ while ((start < end) && (start < filesize)) {
+ if (read_cluster_nonblocking(vma->vm_file,
+ start, filesize) < 0)
+ break;
+ start += CLUSTER_PAGES;
+ }
+ run_task_queue(&tq_disk);
+
+ /* if we're far enough past the beginning of this area,
+ recycle pages that are in the previous window. */
+ if (vma->vm_raend > (vma->vm_pgoff + ra_window + ra_window)) {
+ unsigned long window = ra_window << PAGE_SHIFT;
+
+ end = vma->vm_start + (vma->vm_raend << PAGE_SHIFT);
+ end -= window + window;
+ filemap_sync(vma, end - window, window, MS_INVALIDATE);
+ }
+
+ vma->vm_raend += ra_window;
+ }
+
+ return;
+}
+
+/*
+ * filemap_nopage() is invoked via the vma operations vector for a
+ * mapped memory region to read in file data during a page fault.
+ *
+ * The goto's are kind of ugly, but this streamlines the normal case of having
+ * it in the page cache, and handles the special cases reasonably without
+ * having a lot of duplicated code.
+ */
+struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
+{
+ int error;
+ struct file *file = area->vm_file;
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ struct inode *inode = mapping->host;
+ struct page *page, **hash;
+ unsigned long size, pgoff, endoff;
+
+ pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+ endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+
+retry_all:
+ /*
+ * An external ptracer can access pages that normally aren't
+ * accessible..
+ */
+ size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if ((pgoff >= size) && (area->vm_mm == current->mm))
+ return NULL;
+
+ /* The "size" of the file, as far as mmap is concerned, isn't bigger than the mapping */
+ if (size > endoff)
+ size = endoff;
+
+ /*
+ * Do we have something in the page cache already?
+ */
+ hash = page_hash(mapping, pgoff);
+retry_find:
+ page = __find_get_page(mapping, pgoff, hash);
+ if (!page)
+ goto no_cached_page;
+
+ /*
+ * Ok, found a page in the page cache, now we need to check
+ * that it's up-to-date.
+ */
+ if (!Page_Uptodate(page))
+ goto page_not_uptodate;
+
+success:
+ /*
+ * Try read-ahead for sequential areas.
+ */
+ if (VM_SequentialReadHint(area))
+ nopage_sequential_readahead(area, pgoff, size);
+
+ /*
+ * Found the page and have a reference on it, need to check sharing
+ * and possibly copy it over to another page..
+ */
+ mark_page_accessed(page);
+ flush_page_to_ram(page);
+ return page;
+
+no_cached_page:
+ /*
+ * If the requested offset is within our file, try to read a whole
+ * cluster of pages at once.
+ *
+ * Otherwise, we're off the end of a privately mapped file,
+ * so we need to map a zero page.
+ */
+ if ((pgoff < size) && !VM_RandomReadHint(area))
+ error = read_cluster_nonblocking(file, pgoff, size);
+ else
+ error = page_cache_read(file, pgoff);
+
+ /*
+ * The page we want has now been added to the page cache.
+ * In the unlikely event that someone removed it in the
+ * meantime, we'll just come back here and read it again.
+ */
+ if (error >= 0)
+ goto retry_find;
+
+ /*
+ * An error return from page_cache_read can result if the
+ * system is low on memory, or a problem occurs while trying
+ * to schedule I/O.
+ */
+ if (error == -ENOMEM)
+ return NOPAGE_OOM;
+ return NULL;
+
+page_not_uptodate:
+ lock_page(page);
+
+ /* Did it get unhashed while we waited for it? */
+ if (!page->mapping) {
+ UnlockPage(page);
+ page_cache_release(page);
+ goto retry_all;
+ }
+
+ /* Did somebody else get it up-to-date? */
+ if (Page_Uptodate(page)) {
+ UnlockPage(page);
+ goto success;
+ }
+
+ if (!mapping->a_ops->readpage(file, page)) {
+ wait_on_page(page);
+ if (Page_Uptodate(page))
+ goto success;
+ }
+
+ /*
+ * Umm, take care of errors if the page isn't up-to-date.
+ * Try to re-read it _once_. We do this synchronously,
+ * because there really aren't any performance issues here
+ * and we need to check for errors.
+ */
+ lock_page(page);
+
+ /* Somebody truncated the page on us? */
+ if (!page->mapping) {
+ UnlockPage(page);
+ page_cache_release(page);
+ goto retry_all;
+ }
+
+ /* Somebody else successfully read it in? */
+ if (Page_Uptodate(page)) {
+ UnlockPage(page);
+ goto success;
+ }
+ ClearPageError(page);
+ if (!mapping->a_ops->readpage(file, page)) {
+ wait_on_page(page);
+ if (Page_Uptodate(page))
+ goto success;
+ }
+
+ /*
+ * Things didn't work out. Return zero to tell the
+ * mm layer so, possibly freeing the page cache page first.
+ */
+ page_cache_release(page);
+ return NULL;
+}
+
+/* Called with mm->page_table_lock held to protect against other
+ * threads/the swapper from ripping pte's out from under us.
+ */
+static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
+{
+ pte_t pte = *ptep;
+
+ if (pte_present(pte)) {
+ struct page *page = pte_page(pte);
+ if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
+ flush_tlb_page(vma, address);
+ set_page_dirty(page);
+ }
+ }
+ return 0;
+}
+
+static inline int filemap_sync_pte_range(pmd_t * pmd,
+ unsigned long address, unsigned long size,
+ struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
+{
+ pte_t * pte;
+ unsigned long end;
+ int error;
+
+ if (pmd_none(*pmd))
+ return 0;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return 0;
+ }
+ pte = pte_offset(pmd, address);
+ offset += address & PMD_MASK;
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ error = 0;
+ do {
+ error |= filemap_sync_pte(pte, vma, address + offset, flags);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+ return error;
+}
+
+static inline int filemap_sync_pmd_range(pgd_t * pgd,
+ unsigned long address, unsigned long size,
+ struct vm_area_struct *vma, unsigned int flags)
+{
+ pmd_t * pmd;
+ unsigned long offset, end;
+ int error;
+
+ if (pgd_none(*pgd))
+ return 0;
+ if (pgd_bad(*pgd)) {
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+ return 0;
+ }
+ pmd = pmd_offset(pgd, address);
+ offset = address & PGDIR_MASK;
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ error = 0;
+ do {
+ error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return error;
+}
+
+int filemap_sync(struct vm_area_struct * vma, unsigned long address,
+ size_t size, unsigned int flags)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+ int error = 0;
+
+ /* Aquire the lock early; it may be possible to avoid dropping
+ * and reaquiring it repeatedly.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
+
+ dir = pgd_offset(vma->vm_mm, address);
+ flush_cache_range(vma->vm_mm, end - size, end);
+ if (address >= end)
+ BUG();
+ do {
+ error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ flush_tlb_range(vma->vm_mm, end - size, end);
+
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ return error;
+}
+
+static struct vm_operations_struct generic_file_vm_ops = {
+ nopage: filemap_nopage,
+};
+
+#endif /* NO_MM */
+
+/* This is used for a general mmap of a disk file */
+
+int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+#ifndef NO_MM
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ struct inode *inode = mapping->host;
+
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
+ if (!mapping->a_ops->writepage)
+ return -EINVAL;
+ }
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
+ UPDATE_ATIME(inode);
+ vma->vm_ops = &generic_file_vm_ops;
+ return 0;
+#else /* NO_MM */
+ /* DAVIDM should do something here to get shared mappings working */
+ if (vma->vm_flags & VM_MAYWRITE)
+ return(-ENOSYS);
+ return 0;
+#endif /* NO_MM */
+}
+
+#ifndef NO_MM
+/*
+ * The msync() system call.
+ */
+
+/*
+ * MS_SYNC syncs the entire file - including mappings.
+ *
+ * MS_ASYNC initiates writeout of just the dirty mapped data.
+ * This provides no guarantee of file integrity - things like indirect
+ * blocks may not have started writeout. MS_ASYNC is primarily useful
+ * where the application knows that it has finished with the data and
+ * wishes to intelligently schedule its own I/O traffic.
+ */
+static int msync_interval(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, int flags)
+{
+ int ret = 0;
+ struct file * file = vma->vm_file;
+
+ if ( (flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED) )
+ return -EBUSY;
+
+ if (file && (vma->vm_flags & VM_SHARED)) {
+ ret = filemap_sync(vma, start, end-start, flags);
+
+ if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
+ struct inode * inode = file->f_dentry->d_inode;
+
+ down(&inode->i_sem);
+ ret = filemap_fdatasync(inode->i_mapping);
+ if (flags & MS_SYNC) {
+ int err;
+
+ if (file->f_op && file->f_op->fsync) {
+ err = file->f_op->fsync(file, file->f_dentry, 1);
+ if (err && !ret)
+ ret = err;
+ }
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
+ }
+ up(&inode->i_sem);
+ }
+ }
+ return ret;
+}
+
+#endif /* NO_MM */
+
+asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
+{
+#ifndef NO_MM
+ unsigned long end;
+ struct vm_area_struct * vma;
+ int unmapped_error, error = -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+ if (start & ~PAGE_MASK)
+ goto out;
+ len = (len + ~PAGE_MASK) & PAGE_MASK;
+ end = start + len;
+ if (end < start)
+ goto out;
+ if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
+ goto out;
+ if ((flags & MS_ASYNC) && (flags & MS_SYNC))
+ goto out;
+
+ error = 0;
+ if (end == start)
+ goto out;
+ /*
+ * If the interval [start,end) covers some unmapped address ranges,
+ * just ignore them, but return -ENOMEM at the end.
+ */
+ vma = find_vma(current->mm, start);
+ unmapped_error = 0;
+ for (;;) {
+ /* Still start < end. */
+ error = -ENOMEM;
+ if (!vma)
+ goto out;
+ /* Here start < vma->vm_end. */
+ if (start < vma->vm_start) {
+ unmapped_error = -ENOMEM;
+ start = vma->vm_start;
+ }
+ /* Here vma->vm_start <= start < vma->vm_end. */
+ if (end <= vma->vm_end) {
+ if (start < end) {
+ error = msync_interval(vma, start, end, flags);
+ if (error)
+ goto out;
+ }
+ error = unmapped_error;
+ goto out;
+ }
+ /* Here vma->vm_start <= start < vma->vm_end < end. */
+ error = msync_interval(vma, start, vma->vm_end, flags);
+ if (error)
+ goto out;
+ start = vma->vm_end;
+ vma = vma->vm_next;
+ }
+out:
+ up_read(&current->mm->mmap_sem);
+ return error;
+#else
+ return(-ENOSYS);
+#endif /* NO_MM */
+}
+
+#ifndef NO_MM
+
+static inline void setup_read_behavior(struct vm_area_struct * vma,
+ int behavior)
+{
+ VM_ClearReadHint(vma);
+ switch(behavior) {
+ case MADV_SEQUENTIAL:
+ vma->vm_flags |= VM_SEQ_READ;
+ break;
+ case MADV_RANDOM:
+ vma->vm_flags |= VM_RAND_READ;
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static long madvise_fixup_start(struct vm_area_struct * vma,
+ unsigned long end, int behavior)
+{
+ struct vm_area_struct * n;
+ struct mm_struct * mm = vma->vm_mm;
+
+ n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!n)
+ return -EAGAIN;
+ *n = *vma;
+ n->vm_end = end;
+ setup_read_behavior(n, behavior);
+ n->vm_raend = 0;
+ if (n->vm_file)
+ get_file(n->vm_file);
+ if (n->vm_ops && n->vm_ops->open)
+ n->vm_ops->open(n);
+ vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
+ lock_vma_mappings(vma);
+ spin_lock(&mm->page_table_lock);
+ vma->vm_start = end;
+ __insert_vm_struct(mm, n);
+ spin_unlock(&mm->page_table_lock);
+ unlock_vma_mappings(vma);
+ return 0;
+}
+
+static long madvise_fixup_end(struct vm_area_struct * vma,
+ unsigned long start, int behavior)
+{
+ struct vm_area_struct * n;
+ struct mm_struct * mm = vma->vm_mm;
+
+ n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!n)
+ return -EAGAIN;
+ *n = *vma;
+ n->vm_start = start;
+ n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
+ setup_read_behavior(n, behavior);
+ n->vm_raend = 0;
+ if (n->vm_file)
+ get_file(n->vm_file);
+ if (n->vm_ops && n->vm_ops->open)
+ n->vm_ops->open(n);
+ lock_vma_mappings(vma);
+ spin_lock(&mm->page_table_lock);
+ vma->vm_end = start;
+ __insert_vm_struct(mm, n);
+ spin_unlock(&mm->page_table_lock);
+ unlock_vma_mappings(vma);
+ return 0;
+}
+
+static long madvise_fixup_middle(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, int behavior)
+{
+ struct vm_area_struct * left, * right;
+ struct mm_struct * mm = vma->vm_mm;
+
+ left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!left)
+ return -EAGAIN;
+ right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!right) {
+ kmem_cache_free(vm_area_cachep, left);
+ return -EAGAIN;
+ }
+ *left = *vma;
+ *right = *vma;
+ left->vm_end = start;
+ right->vm_start = end;
+ right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
+ left->vm_raend = 0;
+ right->vm_raend = 0;
+ if (vma->vm_file)
+ atomic_add(2, &vma->vm_file->f_count);
+
+ if (vma->vm_ops && vma->vm_ops->open) {
+ vma->vm_ops->open(left);
+ vma->vm_ops->open(right);
+ }
+ vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
+ vma->vm_raend = 0;
+ lock_vma_mappings(vma);
+ spin_lock(&mm->page_table_lock);
+ vma->vm_start = start;
+ vma->vm_end = end;
+ setup_read_behavior(vma, behavior);
+ __insert_vm_struct(mm, left);
+ __insert_vm_struct(mm, right);
+ spin_unlock(&mm->page_table_lock);
+ unlock_vma_mappings(vma);
+ return 0;
+}
+
+/*
+ * We can potentially split a vm area into separate
+ * areas, each area with its own behavior.
+ */
+static long madvise_behavior(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, int behavior)
+{
+ int error = 0;
+
+ /* This caps the number of vma's this process can own */
+ if (vma->vm_mm->map_count > max_map_count)
+ return -ENOMEM;
+
+ if (start == vma->vm_start) {
+ if (end == vma->vm_end) {
+ setup_read_behavior(vma, behavior);
+ vma->vm_raend = 0;
+ } else
+ error = madvise_fixup_start(vma, end, behavior);
+ } else {
+ if (end == vma->vm_end)
+ error = madvise_fixup_end(vma, start, behavior);
+ else
+ error = madvise_fixup_middle(vma, start, end, behavior);
+ }
+
+ return error;
+}
+
+/*
+ * Schedule all required I/O operations, then run the disk queue
+ * to make sure they are started. Do not wait for completion.
+ */
+static long madvise_willneed(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end)
+{
+ long error = -EBADF;
+ struct file * file;
+ unsigned long size, rlim_rss;
+
+ /* Doesn't work if there's no mapped file. */
+ if (!vma->vm_file)
+ return error;
+ file = vma->vm_file;
+ size = (file->f_dentry->d_inode->i_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+
+ start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ if (end > vma->vm_end)
+ end = vma->vm_end;
+ end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+
+ /* Make sure this doesn't exceed the process's max rss. */
+ error = -EIO;
+ rlim_rss = current->rlim ? current->rlim[RLIMIT_RSS].rlim_cur :
+ LONG_MAX; /* default: see resource.h */
+ if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
+ return error;
+
+ /* round to cluster boundaries if this isn't a "random" area. */
+ if (!VM_RandomReadHint(vma)) {
+ start = CLUSTER_OFFSET(start);
+ end = CLUSTER_OFFSET(end + CLUSTER_PAGES - 1);
+
+ while ((start < end) && (start < size)) {
+ error = read_cluster_nonblocking(file, start, size);
+ start += CLUSTER_PAGES;
+ if (error < 0)
+ break;
+ }
+ } else {
+ while ((start < end) && (start < size)) {
+ error = page_cache_read(file, start);
+ start++;
+ if (error < 0)
+ break;
+ }
+ }
+
+ /* Don't wait for someone else to push these requests. */
+ run_task_queue(&tq_disk);
+
+ return error;
+}
+
+/*
+ * Application no longer needs these pages. If the pages are dirty,
+ * it's OK to just throw them away. The app will be more careful about
+ * data it wants to keep. Be sure to free swap resources too. The
+ * zap_page_range call sets things up for refill_inactive to actually free
+ * these pages later if no one else has touched them in the meantime,
+ * although we could add these pages to a global reuse list for
+ * refill_inactive to pick up before reclaiming other pages.
+ *
+ * NB: This interface discards data rather than pushes it out to swap,
+ * as some implementations do. This has performance implications for
+ * applications like large transactional databases which want to discard
+ * pages in anonymous maps after committing to backing store the data
+ * that was kept in them. There is no reason to write this data out to
+ * the swap area if the application is discarding it.
+ *
+ * An interface that causes the system to free clean pages and flush
+ * dirty pages is already available as msync(MS_INVALIDATE).
+ */
+static long madvise_dontneed(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_flags & VM_LOCKED)
+ return -EINVAL;
+
+ zap_page_range(vma->vm_mm, start, end - start);
+ return 0;
+}
+
+static long madvise_vma(struct vm_area_struct * vma, unsigned long start,
+ unsigned long end, int behavior)
+{
+ long error = -EBADF;
+
+ switch (behavior) {
+ case MADV_NORMAL:
+ case MADV_SEQUENTIAL:
+ case MADV_RANDOM:
+ error = madvise_behavior(vma, start, end, behavior);
+ break;
+
+ case MADV_WILLNEED:
+ error = madvise_willneed(vma, start, end);
+ break;
+
+ case MADV_DONTNEED:
+ error = madvise_dontneed(vma, start, end);
+ break;
+
+ default:
+ error = -EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+#endif /* NO_MM */
+
+/*
+ * The madvise(2) system call.
+ *
+ * Applications can use madvise() to advise the kernel how it should
+ * handle paging I/O in this VM area. The idea is to help the kernel
+ * use appropriate read-ahead and caching techniques. The information
+ * provided is advisory only, and can be safely disregarded by the
+ * kernel without affecting the correct operation of the application.
+ *
+ * behavior values:
+ * MADV_NORMAL - the default behavior is to read clusters. This
+ * results in some read-ahead and read-behind.
+ * MADV_RANDOM - the system should read the minimum amount of data
+ * on any access, since it is unlikely that the appli-
+ * cation will need more than what it asks for.
+ * MADV_SEQUENTIAL - pages in the given range will probably be accessed
+ * once, so they can be aggressively read ahead, and
+ * can be freed soon after they are accessed.
+ * MADV_WILLNEED - the application is notifying the system to read
+ * some pages ahead.
+ * MADV_DONTNEED - the application is finished with the given range,
+ * so the kernel can free resources associated with it.
+ *
+ * return values:
+ * zero - success
+ * -EINVAL - start + len < 0, start is not page-aligned,
+ * "behavior" is not a valid value, or application
+ * is attempting to release locked or shared pages.
+ * -ENOMEM - addresses in the specified range are not currently
+ * mapped, or are outside the AS of the process.
+ * -EIO - an I/O error occurred while paging in data.
+ * -EBADF - map exists, but area maps something that isn't a file.
+ * -EAGAIN - a kernel resource was temporarily unavailable.
+ */
+asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior)
+{
+#ifndef NO_MM
+ unsigned long end;
+ struct vm_area_struct * vma;
+ int unmapped_error = 0;
+ int error = -EINVAL;
+
+ down_write(&current->mm->mmap_sem);
+
+ if (start & ~PAGE_MASK)
+ goto out;
+ len = (len + ~PAGE_MASK) & PAGE_MASK;
+ end = start + len;
+ if (end < start)
+ goto out;
+
+ error = 0;
+ if (end == start)
+ goto out;
+
+ /*
+ * If the interval [start,end) covers some unmapped address
+ * ranges, just ignore them, but return -ENOMEM at the end.
+ */
+ vma = find_vma(current->mm, start);
+ for (;;) {
+ /* Still start < end. */
+ error = -ENOMEM;
+ if (!vma)
+ goto out;
+
+ /* Here start < vma->vm_end. */
+ if (start < vma->vm_start) {
+ unmapped_error = -ENOMEM;
+ start = vma->vm_start;
+ }
+
+ /* Here vma->vm_start <= start < vma->vm_end. */
+ if (end <= vma->vm_end) {
+ if (start < end) {
+ error = madvise_vma(vma, start, end,
+ behavior);
+ if (error)
+ goto out;
+ }
+ error = unmapped_error;
+ goto out;
+ }
+
+ /* Here vma->vm_start <= start < vma->vm_end < end. */
+ error = madvise_vma(vma, start, vma->vm_end, behavior);
+ if (error)
+ goto out;
+ start = vma->vm_end;
+ vma = vma->vm_next;
+ }
+
+out:
+ up_write(&current->mm->mmap_sem);
+ return error;
+#else /* NO_MM */
+ return(-ENOSYS);
+#endif /* NO_MM */
+}
+
+#ifndef NO_MM
+
+/*
+ * Later we can get more picky about what "in core" means precisely.
+ * For now, simply check to see if the page is in the page cache,
+ * and is up to date; i.e. that no page-in operation would be required
+ * at this time if an application were to map and access this page.
+ */
+static unsigned char mincore_page(struct vm_area_struct * vma,
+ unsigned long pgoff)
+{
+ unsigned char present = 0;
+ struct address_space * as = vma->vm_file->f_dentry->d_inode->i_mapping;
+ struct page * page, ** hash = page_hash(as, pgoff);
+
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(as, pgoff, *hash);
+ if ((page) && (Page_Uptodate(page)))
+ present = 1;
+ spin_unlock(&pagecache_lock);
+
+ return present;
+}
+
+static long mincore_vma(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, unsigned char * vec)
+{
+ long error, i, remaining;
+ unsigned char * tmp;
+
+ error = -ENOMEM;
+ if (!vma->vm_file)
+ return error;
+
+ start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ if (end > vma->vm_end)
+ end = vma->vm_end;
+ end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+
+ error = -EAGAIN;
+ tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
+ if (!tmp)
+ return error;
+
+ /* (end - start) is # of pages, and also # of bytes in "vec */
+ remaining = (end - start),
+
+ error = 0;
+ for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
+ int j = 0;
+ long thispiece = (remaining < PAGE_SIZE) ?
+ remaining : PAGE_SIZE;
+
+ while (j < thispiece)
+ tmp[j++] = mincore_page(vma, start++);
+
+ if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
+ error = -EFAULT;
+ break;
+ }
+ }
+
+ free_page((unsigned long) tmp);
+ return error;
+}
+
+#endif /* NO_MM */
+
+/*
+ * The mincore(2) system call.
+ *
+ * mincore() returns the memory residency status of the pages in the
+ * current process's address space specified by [addr, addr + len).
+ * The status is returned in a vector of bytes. The least significant
+ * bit of each byte is 1 if the referenced page is in memory, otherwise
+ * it is zero.
+ *
+ * Because the status of a page can change after mincore() checks it
+ * but before it returns to the application, the returned vector may
+ * contain stale information. Only locked pages are guaranteed to
+ * remain in memory.
+ *
+ * return values:
+ * zero - success
+ * -EFAULT - vec points to an illegal address
+ * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE,
+ * or len has a nonpositive value
+ * -ENOMEM - Addresses in the range [addr, addr + len] are
+ * invalid for the address space of this process, or
+ * specify one or more pages which are not currently
+ * mapped
+ * -EAGAIN - A kernel resource was temporarily unavailable.
+ */
+asmlinkage long sys_mincore(unsigned long start, size_t len,
+ unsigned char * vec)
+{
+#ifndef NO_MM
+ int index = 0;
+ unsigned long end;
+ struct vm_area_struct * vma;
+ int unmapped_error = 0;
+ long error = -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ if (start & ~PAGE_CACHE_MASK)
+ goto out;
+ len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK;
+ end = start + len;
+ if (end < start)
+ goto out;
+
+ error = 0;
+ if (end == start)
+ goto out;
+
+ /*
+ * If the interval [start,end) covers some unmapped address
+ * ranges, just ignore them, but return -ENOMEM at the end.
+ */
+ vma = find_vma(current->mm, start);
+ for (;;) {
+ /* Still start < end. */
+ error = -ENOMEM;
+ if (!vma)
+ goto out;
+
+ /* Here start < vma->vm_end. */
+ if (start < vma->vm_start) {
+ unmapped_error = -ENOMEM;
+ start = vma->vm_start;
+ }
+
+ /* Here vma->vm_start <= start < vma->vm_end. */
+ if (end <= vma->vm_end) {
+ if (start < end) {
+ error = mincore_vma(vma, start, end,
+ &vec[index]);
+ if (error)
+ goto out;
+ }
+ error = unmapped_error;
+ goto out;
+ }
+
+ /* Here vma->vm_start <= start < vma->vm_end < end. */
+ error = mincore_vma(vma, start, vma->vm_end, &vec[index]);
+ if (error)
+ goto out;
+ index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
+ start = vma->vm_end;
+ vma = vma->vm_next;
+ }
+
+out:
+ up_read(&current->mm->mmap_sem);
+ return error;
+#else /* NO_MM */
+ return(-ENOSYS);
+#endif /* NO_MM */
+}
+
+static inline
+struct page *__read_cache_page(struct address_space *mapping,
+ unsigned long index,
+ int (*filler)(void *,struct page*),
+ void *data)
+{
+ struct page **hash = page_hash(mapping, index);
+ struct page *page, *cached_page = NULL;
+ int err;
+repeat:
+ page = __find_get_page(mapping, index, hash);
+ if (!page) {
+ if (!cached_page) {
+ cached_page = page_cache_alloc(mapping);
+ if (!cached_page)
+ return ERR_PTR(-ENOMEM);
+ }
+ page = cached_page;
+ if (add_to_page_cache_unique(page, mapping, index, hash))
+ goto repeat;
+ cached_page = NULL;
+ err = filler(data, page);
+ if (err < 0) {
+ page_cache_release(page);
+ page = ERR_PTR(err);
+ }
+ }
+ if (cached_page)
+ page_cache_release(cached_page);
+ return page;
+}
+
+/*
+ * Read into the page cache. If a page already exists,
+ * and Page_Uptodate() is not set, try to fill the page.
+ */
+struct page *read_cache_page(struct address_space *mapping,
+ unsigned long index,
+ int (*filler)(void *,struct page*),
+ void *data)
+{
+ struct page *page;
+ int err;
+
+retry:
+ page = __read_cache_page(mapping, index, filler, data);
+ if (IS_ERR(page))
+ goto out;
+ mark_page_accessed(page);
+ if (Page_Uptodate(page))
+ goto out;
+
+ lock_page(page);
+ if (!page->mapping) {
+ UnlockPage(page);
+ page_cache_release(page);
+ goto retry;
+ }
+ if (Page_Uptodate(page)) {
+ UnlockPage(page);
+ goto out;
+ }
+ err = filler(data, page);
+ if (err < 0) {
+ page_cache_release(page);
+ page = ERR_PTR(err);
+ }
+ out:
+ return page;
+}
+
+static inline struct page * __grab_cache_page(struct address_space *mapping,
+ unsigned long index, struct page **cached_page)
+{
+ struct page *page, **hash = page_hash(mapping, index);
+repeat:
+ page = __find_lock_page(mapping, index, hash);
+ if (!page) {
+ if (!*cached_page) {
+ *cached_page = page_cache_alloc(mapping);
+ if (!*cached_page)
+ return NULL;
+ }
+ page = *cached_page;
+ if (add_to_page_cache_unique(page, mapping, index, hash))
+ goto repeat;
+ *cached_page = NULL;
+ }
+ return page;
+}
+
+inline void remove_suid(struct inode *inode)
+{
+ unsigned int mode;
+
+ /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
+ mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
+
+ /* was any of the uid bits set? */
+ mode &= inode->i_mode;
+ if (mode && !capable(CAP_FSETID)) {
+ inode->i_mode &= ~mode;
+ mark_inode_dirty(inode);
+ }
+}
+
+/*
+ * Write to a file through the page cache.
+ *
+ * We currently put everything into the page cache prior to writing it.
+ * This is not a problem when writing full pages. With partial pages,
+ * however, we first have to read the data into the cache, then
+ * dirty the page, and finally schedule it for writing. Alternatively, we
+ * could write-through just the portion of data that would go into that
+ * page, but that would kill performance for applications that write data
+ * line by line, and it's prone to race conditions.
+ *
+ * Note that this routine doesn't try to keep track of dirty pages. Each
+ * file system has to do this all by itself, unfortunately.
+ * okir@monad.swb.de
+ */
+ssize_t
+generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
+{
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ struct inode *inode = mapping->host;
+ unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ loff_t pos;
+ struct page *page, *cached_page;
+ ssize_t written;
+ long status = 0;
+ int err;
+ unsigned bytes;
+
+ if ((ssize_t) count < 0)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ cached_page = NULL;
+
+ down(&inode->i_sem);
+
+ pos = *ppos;
+ err = -EINVAL;
+ if (pos < 0)
+ goto out;
+
+ err = file->f_error;
+ if (err) {
+ file->f_error = 0;
+ goto out;
+ }
+
+ written = 0;
+
+ /* FIXME: this is for backwards compatibility with 2.4 */
+ if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND)
+ pos = inode->i_size;
+
+ /*
+ * Check whether we've reached the file size limit.
+ */
+ err = -EFBIG;
+
+ if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) {
+ if (pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+ }
+ if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) {
+ /* send_sig(SIGXFSZ, current, 0); */
+ count = limit - (u32)pos;
+ }
+ }
+
+ /*
+ * LFS rule
+ */
+ if ( pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
+ if (pos >= MAX_NON_LFS) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+ }
+ if (count > MAX_NON_LFS - (u32)pos) {
+ /* send_sig(SIGXFSZ, current, 0); */
+ count = MAX_NON_LFS - (u32)pos;
+ }
+ }
+
+ /*
+ * Are we about to exceed the fs block limit ?
+ *
+ * If we have written data it becomes a short write
+ * If we have exceeded without writing data we send
+ * a signal and give them an EFBIG.
+ *
+ * Linus frestrict idea will clean these up nicely..
+ */
+
+ if (!S_ISBLK(inode->i_mode)) {
+ if (pos >= inode->i_sb->s_maxbytes)
+ {
+ if (count || pos > inode->i_sb->s_maxbytes) {
+ send_sig(SIGXFSZ, current, 0);
+ err = -EFBIG;
+ goto out;
+ }
+ /* zero-length writes at ->s_maxbytes are OK */
+ }
+
+ if (pos + count > inode->i_sb->s_maxbytes)
+ count = inode->i_sb->s_maxbytes - pos;
+ } else {
+ if (is_read_only(inode->i_rdev)) {
+ err = -EPERM;
+ goto out;
+ }
+ if (pos >= inode->i_size) {
+ if (count || pos > inode->i_size) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ if (pos + count > inode->i_size)
+ count = inode->i_size - pos;
+ }
+
+ err = 0;
+ if (count == 0)
+ goto out;
+
+ remove_suid(inode);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ mark_inode_dirty_sync(inode);
+
+ if (file->f_flags & O_DIRECT)
+ goto o_direct;
+
+ do {
+ unsigned long index, offset;
+ long page_fault;
+ char *kaddr;
+
+ /*
+ * Try to find the page in the cache. If it isn't there,
+ * allocate a free page.
+ */
+ offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
+ index = pos >> PAGE_CACHE_SHIFT;
+ bytes = PAGE_CACHE_SIZE - offset;
+ if (bytes > count)
+ bytes = count;
+
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ */
+ { volatile unsigned char dummy;
+ __get_user(dummy, buf);
+ __get_user(dummy, buf+bytes-1);
+ }
+
+ status = -ENOMEM; /* we'll assign it later anyway */
+ page = __grab_cache_page(mapping, index, &cached_page);
+ if (!page)
+ break;
+
+ /* We have exclusive IO access to the page.. */
+ if (!PageLocked(page)) {
+ PAGE_BUG(page);
+ }
+
+ kaddr = kmap(page);
+ status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
+ if (status)
+ goto sync_failure;
+ page_fault = __copy_from_user(kaddr+offset, buf, bytes);
+ flush_dcache_page(page);
+ status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
+ if (page_fault)
+ goto fail_write;
+ if (!status)
+ status = bytes;
+
+ if (status >= 0) {
+ written += status;
+ count -= status;
+ pos += status;
+ buf += status;
+ }
+unlock:
+ kunmap(page);
+ /* Mark it unlocked again and drop the page.. */
+ SetPageReferenced(page);
+ UnlockPage(page);
+ page_cache_release(page);
+
+ if (status < 0)
+ break;
+ } while (count);
+done:
+ *ppos = pos;
+
+ if (cached_page)
+ page_cache_release(cached_page);
+
+ /* For now, when the user asks for O_SYNC, we'll actually
+ * provide O_DSYNC. */
+ if (status >= 0) {
+ if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
+ status = generic_osync_inode(inode, OSYNC_METADATA|OSYNC_DATA);
+ }
+
+out_status:
+ err = written ? written : status;
+out:
+
+ up(&inode->i_sem);
+ return err;
+fail_write:
+ status = -EFAULT;
+ goto unlock;
+
+sync_failure:
+ /*
+ * If blocksize < pagesize, prepare_write() may have instantiated a
+ * few blocks outside i_size. Trim these off again.
+ */
+ kunmap(page);
+ UnlockPage(page);
+ page_cache_release(page);
+ if (pos + bytes > inode->i_size)
+ vmtruncate(inode, inode->i_size);
+ goto done;
+
+o_direct:
+ written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
+ if (written > 0) {
+ loff_t end = pos + written;
+ if (end > inode->i_size && !S_ISBLK(inode->i_mode)) {
+ inode->i_size = end;
+ mark_inode_dirty(inode);
+ }
+ *ppos = end;
+ invalidate_inode_pages2(mapping);
+ }
+ /*
+ * Sync the fs metadata but not the minor inode changes and
+ * of course not the data as we did direct DMA for the IO.
+ */
+ if (written >= 0 && file->f_flags & O_SYNC)
+ status = generic_osync_inode(inode, OSYNC_METADATA);
+ goto out_status;
+}
+
+void __init page_cache_init(unsigned long mempages)
+{
+ unsigned long htable_size, order;
+
+ htable_size = mempages;
+ htable_size *= sizeof(struct page *);
+ for(order = 0; (PAGE_SIZE << order) < htable_size; order++)
+ ;
+
+ do {
+ unsigned long tmp = (PAGE_SIZE << order) / sizeof(struct page *);
+
+ page_hash_bits = 0;
+ while((tmp >>= 1UL) != 0UL)
+ page_hash_bits++;
+
+ page_hash_table = (struct page **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while(page_hash_table == NULL && --order > 0);
+
+ printk("Page-cache hash table entries: %d (order: %lu, %lu bytes)\n",
+ (1 << page_hash_bits), order, (PAGE_SIZE << order));
+ if (!page_hash_table)
+ panic("Failed to allocate page hash table\n");
+ memset((void *)page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/memory.c b/uClinux-2.4.20-uc1/mmnommu/memory.c
new file mode 100644
index 0000000..b848a79
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/memory.c
@@ -0,0 +1,181 @@
+/*
+ * linux/mmnommu/memory.c
+ *
+ * Copyright (c) 2000-2002 SnapGear Inc., David McCullough <davidm@snapgear.com>
+ * Copyright (c) 2000 Lineo,Inc. David McCullough <davidm@lineo.com>
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iobuf.h>
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
+void *high_memory;
+mem_map_t * mem_map = NULL;
+unsigned long max_mapnr;
+unsigned long num_physpages;
+unsigned long num_mappedpages;
+unsigned long askedalloc, realalloc;
+
+/*
+ * Force in an entire range of pages from the current process's user VA,
+ * and pin them in physical memory.
+ */
+
+int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
+{
+ return(0);
+}
+
+/*
+ * Mark all of the pages in a kiobuf as dirty
+ *
+ * We need to be able to deal with short reads from disk: if an IO error
+ * occurs, the number of bytes read into memory may be less than the
+ * size of the kiobuf, so we have to stop marking pages dirty once the
+ * requested byte count has been reached.
+ *
+ * Must be called from process context - set_page_dirty() takes VFS locks.
+ */
+
+void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
+{
+ int index, offset, remaining;
+ struct page *page;
+
+ index = iobuf->offset >> PAGE_SHIFT;
+ offset = iobuf->offset & ~PAGE_MASK;
+ remaining = bytes;
+ if (remaining > iobuf->length)
+ remaining = iobuf->length;
+
+ while (remaining > 0 && index < iobuf->nr_pages) {
+ page = iobuf->maplist[index];
+
+ if (!PageReserved(page))
+ set_page_dirty(page);
+
+ remaining -= (PAGE_SIZE - offset);
+ offset = 0;
+ index++;
+ }
+}
+
+/*
+ * Unmap all of the pages referenced by a kiobuf. We release the pages,
+ * and unlock them if they were locked.
+ */
+
+void unmap_kiobuf (struct kiobuf *iobuf)
+{
+}
+
+
+/*
+ * Lock down all of the pages of a kiovec for IO.
+ *
+ * If any page is mapped twice in the kiovec, we return the error -EINVAL.
+ *
+ * The optional wait parameter causes the lock call to block until all
+ * pages can be locked if set. If wait==0, the lock operation is
+ * aborted if any locked pages are found and -EAGAIN is returned.
+ */
+
+int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
+{
+ return 0;
+}
+
+/*
+ * Unlock all of the pages of a kiovec after IO.
+ */
+
+int unlock_kiovec(int nr, struct kiobuf *iovec[])
+{
+ return 0;
+}
+
+/*
+ * Handle all mappings that got truncated by a "truncate()"
+ * system call.
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+int vmtruncate(struct inode * inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ if (inode->i_size < offset)
+ goto do_expand;
+ inode->i_size = offset;
+
+ truncate_inode_pages(mapping, offset);
+ goto out_truncate;
+
+do_expand:
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out;
+ inode->i_size = offset;
+
+out_truncate:
+ if (inode->i_op && inode->i_op->truncate) {
+ lock_kernel();
+ inode->i_op->truncate(inode);
+ unlock_kernel();
+ }
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out:
+ return -EFBIG;
+}
+
+
+/* Note: this is only safe if the mm semaphore is held when called. */
+int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+ return -EPERM;
+}
+
+
+/*
+ * The nommu dodgy version :-)
+ */
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int i;
+ static struct vm_area_struct dummy_vma;
+
+ for (i = 0; i < len; i++) {
+ if (pages) {
+ pages[i] = virt_to_page(start);
+ if (pages[i])
+ page_cache_get(pages[i]);
+ }
+ if (vmas)
+ vmas[i] = &dummy_vma;
+ start += PAGE_SIZE;
+ }
+ return(i);
+}
+
+
+struct page * vmalloc_to_page(void * vmalloc_addr)
+{
+ return(virt_to_page(vmalloc_addr));
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/mlock.c b/uClinux-2.4.20-uc1/mmnommu/mlock.c
new file mode 100644
index 0000000..71aaae4
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/mlock.c
@@ -0,0 +1,35 @@
+/*
+ * linux/mm/mlock.c
+ *
+ * Copyright (c) 2001 Lineo, Inc. David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ * (C) Copyright 1995 Linus Torvalds
+ */
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+asmlinkage long sys_mlock(unsigned long start, size_t len)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys_munlock(unsigned long start, size_t len)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys_mlockall(int flags)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys_munlockall(void)
+{
+ return -ENOSYS;
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/mmap.c b/uClinux-2.4.20-uc1/mmnommu/mmap.c
new file mode 100644
index 0000000..72fb614
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/mmap.c
@@ -0,0 +1,1574 @@
+/*
+ * linux/mm/mmap.c
+ *
+ * Written by obz.
+ *
+ * NO_MM
+ * Copyright (c) 2001 Lineo, Inc. David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ */
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+#ifndef NO_MM
+/*
+ * WARNING: the debugging will use recursive algorithms so never enable this
+ * unless you know what you are doing.
+ */
+#undef DEBUG_MM_RB
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+pgprot_t protection_map[16] = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+int sysctl_overcommit_memory;
+int max_map_count = DEFAULT_MAX_MAP_COUNT;
+
+/* Check that a process has enough memory to allocate a
+ * new virtual mapping.
+ */
+int vm_enough_memory(long pages)
+{
+ /* Stupid algorithm to decide if we have enough memory: while
+ * simple, it hopefully works in most obvious cases.. Easy to
+ * fool it, but this should catch most mistakes.
+ */
+ /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
+ * which tries to do "TheRightThing". Instead of using half of
+ * (buffers+cache), use the minimum values. Allow an extra 2%
+ * of num_physpages for safety margin.
+ */
+
+ unsigned long free;
+
+ /* Sometimes we want to use more memory than we have. */
+ if (sysctl_overcommit_memory)
+ return 1;
+
+ /* The page cache contains buffer pages these days.. */
+ free = atomic_read(&page_cache_size);
+ free += nr_free_pages();
+ free += nr_swap_pages;
+
+ /*
+ * This double-counts: the nrpages are both in the page-cache
+ * and in the swapper space. At the same time, this compensates
+ * for the swap-space over-allocation (ie "nr_swap_pages" being
+ * too small.
+ */
+ free += swapper_space.nrpages;
+
+ /*
+ * The code below doesn't account for free space in the inode
+ * and dentry slab cache, slab cache fragmentation, inodes and
+ * dentries which will become freeable under VM load, etc.
+ * Lets just hope all these (complex) factors balance out...
+ */
+ free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
+ free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
+
+ return free > pages;
+}
+
+/* Remove one vm structure from the inode's i_mapping address space. */
+static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
+{
+ struct file * file = vma->vm_file;
+
+ if (file) {
+ struct inode *inode = file->f_dentry->d_inode;
+ if (vma->vm_flags & VM_DENYWRITE)
+ atomic_inc(&inode->i_writecount);
+ if(vma->vm_next_share)
+ vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
+ *vma->vm_pprev_share = vma->vm_next_share;
+ }
+}
+
+static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
+{
+ lock_vma_mappings(vma);
+ __remove_shared_vm_struct(vma);
+ unlock_vma_mappings(vma);
+}
+
+void lock_vma_mappings(struct vm_area_struct *vma)
+{
+ struct address_space *mapping;
+
+ mapping = NULL;
+ if (vma->vm_file)
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+ if (mapping)
+ spin_lock(&mapping->i_shared_lock);
+}
+
+void unlock_vma_mappings(struct vm_area_struct *vma)
+{
+ struct address_space *mapping;
+
+ mapping = NULL;
+ if (vma->vm_file)
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+ if (mapping)
+ spin_unlock(&mapping->i_shared_lock);
+}
+
+#endif /* NO_MM */
+
+/*
+ * sys_brk() for the most part doesn't need the global kernel
+ * lock, except when an application is doing something nasty
+ * like trying to un-brk an area that has already been mapped
+ * to a regular file. in this case, the unmapping will need
+ * to invoke file system routines that need the global lock.
+ */
+asmlinkage unsigned long sys_brk(unsigned long brk)
+{
+#ifndef NO_MM
+ unsigned long rlim, retval;
+ unsigned long newbrk, oldbrk;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+
+ if (brk < mm->end_code)
+ goto out;
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(mm->brk);
+ if (oldbrk == newbrk)
+ goto set_brk;
+
+ /* Always allow shrinking brk. */
+ if (brk <= mm->brk) {
+ if (!do_munmap(mm, newbrk, oldbrk-newbrk))
+ goto set_brk;
+ goto out;
+ }
+
+ /* Check against rlimit.. */
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
+ /* Check against existing mmap mappings. */
+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ goto out;
+
+ /* Check if we have enough memory.. */
+ if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
+ goto out;
+
+ /* Ok, looks good - let it rip. */
+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+ goto out;
+set_brk:
+ mm->brk = brk;
+out:
+ retval = mm->brk;
+ up_write(&mm->mmap_sem);
+ return retval;
+#else
+ struct mm_struct *mm = current->mm;
+
+ if (brk < mm->end_code || brk < mm->start_brk || brk > mm->end_brk)
+ return mm->brk;
+
+ if (mm->brk == brk)
+ return mm->brk;
+
+ /*
+ * Always allow shrinking brk
+ */
+ if (brk <= mm->brk) {
+ mm->brk = brk;
+ return brk;
+ }
+
+ /*
+ * Ok, looks good - let it rip.
+ */
+ return mm->brk = brk;
+#endif
+}
+
+
+/* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
+ * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
+ * into "VM_xxx".
+ */
+static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
+{
+#define _trans(x,bit1,bit2) \
+((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
+
+ unsigned long prot_bits, flag_bits;
+ prot_bits =
+ _trans(prot, PROT_READ, VM_READ) |
+ _trans(prot, PROT_WRITE, VM_WRITE) |
+ _trans(prot, PROT_EXEC, VM_EXEC);
+ flag_bits =
+ _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
+ _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
+ _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
+ return prot_bits | flag_bits;
+#undef _trans
+}
+
+#ifndef NO_MM
+
+#ifdef DEBUG_MM_RB
+static int browse_rb(rb_node_t * rb_node) {
+ int i = 0;
+ if (rb_node) {
+ i++;
+ i += browse_rb(rb_node->rb_left);
+ i += browse_rb(rb_node->rb_right);
+ }
+ return i;
+}
+
+static void validate_mm(struct mm_struct * mm) {
+ int bug = 0;
+ int i = 0;
+ struct vm_area_struct * tmp = mm->mmap;
+ while (tmp) {
+ tmp = tmp->vm_next;
+ i++;
+ }
+ if (i != mm->map_count)
+ printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
+ i = browse_rb(mm->mm_rb.rb_node);
+ if (i != mm->map_count)
+ printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
+ if (bug)
+ BUG();
+}
+#else
+#define validate_mm(mm) do { } while (0)
+#endif
+
+static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct ** pprev,
+ rb_node_t *** rb_link, rb_node_t ** rb_parent)
+{
+ struct vm_area_struct * vma;
+ rb_node_t ** __rb_link, * __rb_parent, * rb_prev;
+
+ __rb_link = &mm->mm_rb.rb_node;
+ rb_prev = __rb_parent = NULL;
+ vma = NULL;
+
+ while (*__rb_link) {
+ struct vm_area_struct *vma_tmp;
+
+ __rb_parent = *__rb_link;
+ vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ return vma;
+ __rb_link = &__rb_parent->rb_left;
+ } else {
+ rb_prev = __rb_parent;
+ __rb_link = &__rb_parent->rb_right;
+ }
+ }
+
+ *pprev = NULL;
+ if (rb_prev)
+ *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
+ *rb_link = __rb_link;
+ *rb_parent = __rb_parent;
+ return vma;
+}
+
+static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
+ rb_node_t * rb_parent)
+{
+ if (prev) {
+ vma->vm_next = prev->vm_next;
+ prev->vm_next = vma;
+ } else {
+ mm->mmap = vma;
+ if (rb_parent)
+ vma->vm_next = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
+ else
+ vma->vm_next = NULL;
+ }
+}
+
+static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
+ rb_node_t ** rb_link, rb_node_t * rb_parent)
+{
+ rb_link_node(&vma->vm_rb, rb_parent, rb_link);
+ rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+}
+
+static inline void __vma_link_file(struct vm_area_struct * vma)
+{
+ struct file * file;
+
+ file = vma->vm_file;
+ if (file) {
+ struct inode * inode = file->f_dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ struct vm_area_struct **head;
+
+ if (vma->vm_flags & VM_DENYWRITE)
+ atomic_dec(&inode->i_writecount);
+
+ head = &mapping->i_mmap;
+ if (vma->vm_flags & VM_SHARED)
+ head = &mapping->i_mmap_shared;
+
+ /* insert vma into inode's share list */
+ if((vma->vm_next_share = *head) != NULL)
+ (*head)->vm_pprev_share = &vma->vm_next_share;
+ *head = vma;
+ vma->vm_pprev_share = head;
+ }
+}
+
+static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
+ rb_node_t ** rb_link, rb_node_t * rb_parent)
+{
+ __vma_link_list(mm, vma, prev, rb_parent);
+ __vma_link_rb(mm, vma, rb_link, rb_parent);
+ __vma_link_file(vma);
+}
+
+static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
+ rb_node_t ** rb_link, rb_node_t * rb_parent)
+{
+ lock_vma_mappings(vma);
+ spin_lock(&mm->page_table_lock);
+ __vma_link(mm, vma, prev, rb_link, rb_parent);
+ spin_unlock(&mm->page_table_lock);
+ unlock_vma_mappings(vma);
+
+ mm->map_count++;
+ validate_mm(mm);
+}
+
+static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
+ rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
+{
+ spinlock_t * lock = &mm->page_table_lock;
+ if (!prev) {
+ prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
+ goto merge_next;
+ }
+ if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
+ struct vm_area_struct * next;
+
+ spin_lock(lock);
+ prev->vm_end = end;
+ next = prev->vm_next;
+ if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
+ prev->vm_end = next->vm_end;
+ __vma_unlink(mm, next, prev);
+ spin_unlock(lock);
+
+ mm->map_count--;
+ kmem_cache_free(vm_area_cachep, next);
+ return 1;
+ }
+ spin_unlock(lock);
+ return 1;
+ }
+
+ prev = prev->vm_next;
+ if (prev) {
+ merge_next:
+ if (!can_vma_merge(prev, vm_flags))
+ return 0;
+ if (end == prev->vm_start) {
+ spin_lock(lock);
+ prev->vm_start = addr;
+ spin_unlock(lock);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long pgoff)
+{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ unsigned int vm_flags;
+ int correct_wcount = 0;
+ int error;
+ rb_node_t ** rb_link, * rb_parent;
+
+ if (file && (!file->f_op || !file->f_op->mmap))
+ return -ENODEV;
+
+ if ((len = PAGE_ALIGN(len)) == 0)
+ return addr;
+
+ if (len > TASK_SIZE)
+ return -EINVAL;
+
+ /* offset overflow? */
+ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+ return -EINVAL;
+
+ /* Too many mappings? */
+ if (mm->map_count > max_map_count)
+ return -ENOMEM;
+
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+ addr = get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+ /* Do simple checking here so the lower-level routines won't have
+ * to. we assume access permissions have been handled by the open
+ * of the memory object, so we don't do any here.
+ */
+ vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
+ /* mlock MCL_FUTURE? */
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+
+ if (file) {
+ switch (flags & MAP_TYPE) {
+ case MAP_SHARED:
+ if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
+ return -EACCES;
+
+ /* Make sure we don't allow writing to an append-only file.. */
+ if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
+ return -EACCES;
+
+ /* make sure there are no mandatory locks on the file. */
+ if (locks_verify_locked(file->f_dentry->d_inode))
+ return -EAGAIN;
+
+ vm_flags |= VM_SHARED | VM_MAYSHARE;
+ if (!(file->f_mode & FMODE_WRITE))
+ vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
+
+ /* fall through */
+ case MAP_PRIVATE:
+ if (!(file->f_mode & FMODE_READ))
+ return -EACCES;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ vm_flags |= VM_SHARED | VM_MAYSHARE;
+ switch (flags & MAP_TYPE) {
+ default:
+ return -EINVAL;
+ case MAP_PRIVATE:
+ vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ /* fall through */
+ case MAP_SHARED:
+ break;
+ }
+ }
+
+ /* Clear old maps */
+munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+ goto munmap_back;
+ }
+
+ /* Check against address space limit. */
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+
+ /* Private writable mapping? Check memory availability.. */
+ if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
+ !(flags & MAP_NORESERVE) &&
+ !vm_enough_memory(len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ /* Can we just expand an old anonymous mapping? */
+ if (!file && !(vm_flags & VM_SHARED) && rb_parent)
+ if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags))
+ goto out;
+
+ /* Determine the object being mapped and call the appropriate
+ * specific mapper. the address has already been validated, but
+ * not unmapped, but the maps are removed from the list.
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma)
+ return -ENOMEM;
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags;
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = pgoff;
+ vma->vm_file = NULL;
+ vma->vm_private_data = NULL;
+ vma->vm_raend = 0;
+
+ if (file) {
+ error = -EINVAL;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ goto free_vma;
+ if (vm_flags & VM_DENYWRITE) {
+ error = deny_write_access(file);
+ if (error)
+ goto free_vma;
+ correct_wcount = 1;
+ }
+ vma->vm_file = file;
+ get_file(file);
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
+ } else if (flags & MAP_SHARED) {
+ error = shmem_zero_setup(vma);
+ if (error)
+ goto free_vma;
+ }
+
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+ * f_op->mmap method. -DaveM
+ */
+ if (addr != vma->vm_start) {
+ /*
+ * It is a bit too late to pretend changing the virtual
+ * area of the mapping, we just corrupted userspace
+ * in the do_munmap, so FIXME (not in 2.4 to avoid breaking
+ * the driver API).
+ */
+ struct vm_area_struct * stale_vma;
+ /* Since addr changed, we rely on the mmap op to prevent
+ * collisions with existing vmas and just use find_vma_prepare
+ * to update the tree pointers.
+ */
+ addr = vma->vm_start;
+ stale_vma = find_vma_prepare(mm, addr, &prev,
+ &rb_link, &rb_parent);
+ /*
+ * Make sure the lowlevel driver did its job right.
+ */
+ if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end)) {
+ printk(KERN_ERR "buggy mmap operation: [<%p>]\n",
+ file ? file->f_op->mmap : NULL);
+ BUG();
+ }
+ }
+
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ if (correct_wcount)
+ atomic_inc(&file->f_dentry->d_inode->i_writecount);
+
+out:
+ mm->total_vm += len >> PAGE_SHIFT;
+ if (vm_flags & VM_LOCKED) {
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
+ return addr;
+
+unmap_and_free_vma:
+ if (correct_wcount)
+ atomic_inc(&file->f_dentry->d_inode->i_writecount);
+ vma->vm_file = NULL;
+ fput(file);
+
+ /* Undo any partial mapping done by a device driver. */
+ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+free_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ return error;
+}
+
+/* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+ * Ugly calling convention alert:
+ * Return value with the low bits set means error value,
+ * ie
+ * if (ret & ~PAGE_MASK)
+ * error = ret;
+ *
+ * This function "knows" that -ENOMEM has the bits set.
+ */
+#ifndef HAVE_ARCH_UNMAPPED_AREA
+static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ }
+}
+#else
+extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+#endif
+
+unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ if (flags & MAP_FIXED) {
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ return addr;
+ }
+
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
+
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct *vma = NULL;
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ rb_node_t * rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct * vma_tmp;
+
+ vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else
+ rb_node = rb_node->rb_right;
+ }
+ if (vma)
+ mm->mmap_cache = vma;
+ }
+ }
+ return vma;
+}
+
+/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev)
+{
+ if (mm) {
+ /* Go through the RB tree quickly. */
+ struct vm_area_struct * vma;
+ rb_node_t * rb_node, * rb_last_right, * rb_prev;
+
+ rb_node = mm->mm_rb.rb_node;
+ rb_last_right = rb_prev = NULL;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct * vma_tmp;
+
+ vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ rb_prev = rb_last_right;
+ if (vma_tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_last_right = rb_node;
+ rb_node = rb_node->rb_right;
+ }
+ }
+ if (vma) {
+ if (vma->vm_rb.rb_left) {
+ rb_prev = vma->vm_rb.rb_left;
+ while (rb_prev->rb_right)
+ rb_prev = rb_prev->rb_right;
+ }
+ *pprev = NULL;
+ if (rb_prev)
+ *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
+ if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma)
+ BUG();
+ return vma;
+ }
+ }
+ *pprev = NULL;
+ return NULL;
+}
+
+struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * vma;
+ unsigned long start;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ start = vma->vm_start;
+ if (expand_stack(vma, addr))
+ return NULL;
+ if (vma->vm_flags & VM_LOCKED) {
+ make_pages_present(addr, start);
+ }
+ return vma;
+}
+
+/* Normal function to fix up a mapping
+ * This function is the default for when an area has no specific
+ * function. This may be used as part of a more specific routine.
+ * This function works out what part of an area is affected and
+ * adjusts the mapping information. Since the actual page
+ * manipulation is done in do_mmap(), none need be done here,
+ * though it would probably be more appropriate.
+ *
+ * By the time this function is called, the area struct has been
+ * removed from the process mapping list, so it needs to be
+ * reinserted if necessary.
+ *
+ * The 4 main cases are:
+ * Unmapping the whole area
+ * Unmapping from the start of the segment to a point in it
+ * Unmapping from an intermediate point to the end
+ * Unmapping between to intermediate points, making a hole.
+ *
+ * Case 4 involves the creation of 2 new areas, for each side of
+ * the hole. If possible, we reuse the existing area rather than
+ * allocate a new one, and the return indicates whether the old
+ * area was reused.
+ */
+static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
+ struct vm_area_struct *area, unsigned long addr, size_t len,
+ struct vm_area_struct *extra)
+{
+ struct vm_area_struct *mpnt;
+ unsigned long end = addr + len;
+
+ area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+ if (area->vm_flags & VM_LOCKED)
+ area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+
+ /* Unmapping the whole area. */
+ if (addr == area->vm_start && end == area->vm_end) {
+ if (area->vm_ops && area->vm_ops->close)
+ area->vm_ops->close(area);
+ if (area->vm_file)
+ fput(area->vm_file);
+ kmem_cache_free(vm_area_cachep, area);
+ return extra;
+ }
+
+ /* Work out to one of the ends. */
+ if (end == area->vm_end) {
+ /*
+ * here area isn't visible to the semaphore-less readers
+ * so we don't need to update it under the spinlock.
+ */
+ area->vm_end = addr;
+ lock_vma_mappings(area);
+ spin_lock(&mm->page_table_lock);
+ } else if (addr == area->vm_start) {
+ area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
+ /* same locking considerations of the above case */
+ area->vm_start = end;
+ lock_vma_mappings(area);
+ spin_lock(&mm->page_table_lock);
+ } else {
+ /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
+ /* Add end mapping -- leave beginning for below */
+ mpnt = extra;
+ extra = NULL;
+
+ mpnt->vm_mm = area->vm_mm;
+ mpnt->vm_start = end;
+ mpnt->vm_end = area->vm_end;
+ mpnt->vm_page_prot = area->vm_page_prot;
+ mpnt->vm_flags = area->vm_flags;
+ mpnt->vm_raend = 0;
+ mpnt->vm_ops = area->vm_ops;
+ mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
+ mpnt->vm_file = area->vm_file;
+ mpnt->vm_private_data = area->vm_private_data;
+ if (mpnt->vm_file)
+ get_file(mpnt->vm_file);
+ if (mpnt->vm_ops && mpnt->vm_ops->open)
+ mpnt->vm_ops->open(mpnt);
+ area->vm_end = addr; /* Truncate area */
+
+ /* Because mpnt->vm_file == area->vm_file this locks
+ * things correctly.
+ */
+ lock_vma_mappings(area);
+ spin_lock(&mm->page_table_lock);
+ __insert_vm_struct(mm, mpnt);
+ }
+
+ __insert_vm_struct(mm, area);
+ spin_unlock(&mm->page_table_lock);
+ unlock_vma_mappings(area);
+ return extra;
+}
+
+/*
+ * Try to free as many page directory entries as we can,
+ * without having to work very hard at actually scanning
+ * the page tables themselves.
+ *
+ * Right now we try to free page tables if we have a nice
+ * PGDIR-aligned area that got free'd up. We could be more
+ * granular if we want to, but this is fast and simple,
+ * and covers the bad cases.
+ *
+ * "prev", if it exists, points to a vma before the one
+ * we just free'd - but there's no telling how much before.
+ */
+static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end)
+{
+ unsigned long first = start & PGDIR_MASK;
+ unsigned long last = end + PGDIR_SIZE - 1;
+ unsigned long start_index, end_index;
+
+ if (!prev) {
+ prev = mm->mmap;
+ if (!prev)
+ goto no_mmaps;
+ if (prev->vm_end > start) {
+ if (last > prev->vm_start)
+ last = prev->vm_start;
+ goto no_mmaps;
+ }
+ }
+ for (;;) {
+ struct vm_area_struct *next = prev->vm_next;
+
+ if (next) {
+ if (next->vm_start < start) {
+ prev = next;
+ continue;
+ }
+ if (last > next->vm_start)
+ last = next->vm_start;
+ }
+ if (prev->vm_end > first)
+ first = prev->vm_end + PGDIR_SIZE - 1;
+ break;
+ }
+no_mmaps:
+ /*
+ * If the PGD bits are not consecutive in the virtual address, the
+ * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
+ */
+ start_index = pgd_index(first);
+ end_index = pgd_index(last);
+ if (end_index > start_index) {
+ clear_page_tables(mm, start_index, end_index - start_index);
+ flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
+ }
+}
+
+/* Munmap is split into 2 main parts -- this part which finds
+ * what needs doing, and the areas themselves, which do the
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardine <jeremy@sw.oz.au>
+ */
+int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
+{
+ struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
+
+ if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
+ return -EINVAL;
+
+ if ((len = PAGE_ALIGN(len)) == 0)
+ return -EINVAL;
+
+ /* Check if this memory area is ok - put it on the temporary
+ * list if so.. The checks here are pretty simple --
+ * every area affected in some way (by any overlap) is put
+ * on the list. If nothing is put on, nothing is affected.
+ */
+ mpnt = find_vma_prev(mm, addr, &prev);
+ if (!mpnt)
+ return 0;
+ /* we have addr < mpnt->vm_end */
+
+ if (mpnt->vm_start >= addr+len)
+ return 0;
+
+ /* If we'll make "hole", check the vm areas limit */
+ if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
+ && mm->map_count >= max_map_count)
+ return -ENOMEM;
+
+ /*
+ * We may need one additional vma to fix up the mappings ...
+ * and this is the last chance for an easy error exit.
+ */
+ extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!extra)
+ return -ENOMEM;
+
+ npp = (prev ? &prev->vm_next : &mm->mmap);
+ free = NULL;
+ spin_lock(&mm->page_table_lock);
+ for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
+ *npp = mpnt->vm_next;
+ mpnt->vm_next = free;
+ free = mpnt;
+ rb_erase(&mpnt->vm_rb, &mm->mm_rb);
+ }
+ mm->mmap_cache = NULL; /* Kill the cache. */
+ spin_unlock(&mm->page_table_lock);
+
+ /* Ok - we have the memory areas we should free on the 'free' list,
+ * so release them, and unmap the page range..
+ * If the one of the segments is only being partially unmapped,
+ * it will put new vm_area_struct(s) into the address space.
+ * In that case we have to be careful with VM_DENYWRITE.
+ */
+ while ((mpnt = free) != NULL) {
+ unsigned long st, end, size;
+ struct file *file = NULL;
+
+ free = free->vm_next;
+
+ st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
+ end = addr+len;
+ end = end > mpnt->vm_end ? mpnt->vm_end : end;
+ size = end - st;
+
+ if (mpnt->vm_flags & VM_DENYWRITE &&
+ (st != mpnt->vm_start || end != mpnt->vm_end) &&
+ (file = mpnt->vm_file) != NULL) {
+ atomic_dec(&file->f_dentry->d_inode->i_writecount);
+ }
+ remove_shared_vm_struct(mpnt);
+ mm->map_count--;
+
+ zap_page_range(mm, st, size);
+
+ /*
+ * Fix the mapping, and free the old area if it wasn't reused.
+ */
+ extra = unmap_fixup(mm, mpnt, st, size, extra);
+ if (file)
+ atomic_inc(&file->f_dentry->d_inode->i_writecount);
+ }
+ validate_mm(mm);
+
+ /* Release the extra vma struct if it wasn't used */
+ if (extra)
+ kmem_cache_free(vm_area_cachep, extra);
+
+ free_pgtables(mm, prev, addr, addr+len);
+
+ return 0;
+}
+
+asmlinkage long sys_munmap(unsigned long addr, size_t len)
+{
+ int ret;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+/*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
+unsigned long do_brk(unsigned long addr, unsigned long len)
+{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ unsigned long flags;
+ rb_node_t ** rb_link, * rb_parent;
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return addr;
+
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+ munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+ goto munmap_back;
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+
+ if (mm->map_count > max_map_count)
+ return -ENOMEM;
+
+ if (!vm_enough_memory(len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
+
+ /* Can we just expand an old anonymous mapping? */
+ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
+ goto out;
+
+ /*
+ * create a vma struct for an anonymous mapping
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma)
+ return -ENOMEM;
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = flags;
+ vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = 0;
+ vma->vm_file = NULL;
+ vma->vm_private_data = NULL;
+
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+
+out:
+ mm->total_vm += len >> PAGE_SHIFT;
+ if (flags & VM_LOCKED) {
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
+ return addr;
+}
+
+/* Build the RB tree corresponding to the VMA list. */
+void build_mmap_rb(struct mm_struct * mm)
+{
+ struct vm_area_struct * vma;
+ rb_node_t ** rb_link, * rb_parent;
+
+ mm->mm_rb = RB_ROOT;
+ rb_link = &mm->mm_rb.rb_node;
+ rb_parent = NULL;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ __vma_link_rb(mm, vma, rb_link, rb_parent);
+ rb_parent = &vma->vm_rb;
+ rb_link = &rb_parent->rb_right;
+ }
+}
+
+/* Release all mmaps. */
+void exit_mmap(struct mm_struct * mm)
+{
+ struct vm_area_struct * mpnt;
+
+ release_segments(mm);
+ spin_lock(&mm->page_table_lock);
+ mpnt = mm->mmap;
+ mm->mmap = mm->mmap_cache = NULL;
+ mm->mm_rb = RB_ROOT;
+ mm->rss = 0;
+ spin_unlock(&mm->page_table_lock);
+ mm->total_vm = 0;
+ mm->locked_vm = 0;
+
+ flush_cache_mm(mm);
+ while (mpnt) {
+ struct vm_area_struct * next = mpnt->vm_next;
+ unsigned long start = mpnt->vm_start;
+ unsigned long end = mpnt->vm_end;
+ unsigned long size = end - start;
+
+ if (mpnt->vm_ops) {
+ if (mpnt->vm_ops->close)
+ mpnt->vm_ops->close(mpnt);
+ }
+ mm->map_count--;
+ remove_shared_vm_struct(mpnt);
+ zap_page_range(mm, start, size);
+ if (mpnt->vm_file)
+ fput(mpnt->vm_file);
+ kmem_cache_free(vm_area_cachep, mpnt);
+ mpnt = next;
+ }
+
+ /* This is just debugging */
+ if (mm->map_count)
+ BUG();
+
+ clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
+
+ flush_tlb_mm(mm);
+}
+
+/* Insert vm structure into process list sorted by address
+ * and into the inode's i_mmap ring. If vm_file is non-NULL
+ * then the i_shared_lock must be held here.
+ */
+void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ struct vm_area_struct * __vma, * prev;
+ rb_node_t ** rb_link, * rb_parent;
+
+ __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ BUG();
+ __vma_link(mm, vma, prev, rb_link, rb_parent);
+ mm->map_count++;
+ validate_mm(mm);
+}
+
+void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ struct vm_area_struct * __vma, * prev;
+ rb_node_t ** rb_link, * rb_parent;
+
+ __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ BUG();
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ validate_mm(mm);
+}
+
+#else /* NO_MM */
+
+unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ if (flags & MAP_FIXED) {
+ if (addr > TASK_SIZE - len)
+ return -EINVAL;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ return addr;
+ }
+
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void show_process_blocks(void)
+{
+ struct mm_tblock_struct * tblock, *tmp;
+
+ printk("Process blocks %d:", current->pid);
+
+ tmp = &current->mm->tblock;
+ while (tmp) {
+ printk(" %p: %p", tmp, tmp->rblock);
+ if (tmp->rblock)
+ printk(" (%d @%p #%d)", ksize(tmp->rblock->kblock), tmp->rblock->kblock, tmp->rblock->refcount);
+ if (tmp->next)
+ printk(" ->");
+ else
+ printk(".");
+ tmp = tmp->next;
+ }
+ printk("\n");
+}
+#endif /* DEBUG */
+
+extern unsigned long askedalloc, realalloc;
+
+unsigned long do_mmap_pgoff(
+ struct file * file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long prot,
+ unsigned long flags,
+ unsigned long pgoff)
+{
+ void * result;
+#if 0
+ struct mm_struct * mm = current->mm;
+#endif
+ struct mm_tblock_struct * tblock;
+ unsigned int vm_flags;
+
+ /*
+ * Get the NO_MM specific checks done first
+ */
+ if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file)) {
+ printk("MAP_SHARED not supported (cannot write mappings to disk)\n");
+ return -EINVAL;
+ }
+
+ if ((prot & PROT_WRITE) && (flags & MAP_PRIVATE)) {
+ printk("Private writable mappings not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * now all the standard checks
+ */
+ if (file && (!file->f_op || !file->f_op->mmap))
+ return -ENODEV;
+
+ if (PAGE_ALIGN(len) == 0)
+ return addr;
+
+ if (len > TASK_SIZE)
+ return -EINVAL;
+
+ /* offset overflow? */
+ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+ return -EINVAL;
+
+#if 0
+ /* Too many mappings? */
+ if (mm->map_count > MAX_MAP_COUNT)
+ return -ENOMEM;
+#endif
+
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+ addr = get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+ /* Do simple checking here so the lower-level routines won't have
+ * to. we assume access permissions have been handled by the open
+ * of the memory object, so we don't do any here.
+ */
+ vm_flags = calc_vm_flags(prot,flags) /* | mm->def_flags */ | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
+#if 0
+ /* mlock MCL_FUTURE? */
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+#endif
+
+ /*
+ * determine the object being mapped and call the appropriate
+ * specific mapper.
+ */
+
+ if (file) {
+ struct vm_area_struct vma;
+ int error;
+
+
+ if (!file->f_op)
+ return -ENODEV;
+
+ vma.vm_start = addr;
+ vma.vm_end = addr + len;
+ vma.vm_flags = vm_flags;
+ vma.vm_offset = pgoff << PAGE_SHIFT;
+
+#ifdef MAGIC_ROM_PTR
+ /* First, try simpler routine designed to give us a ROM pointer. */
+
+ if (file->f_op->romptr && !(prot & PROT_WRITE)) {
+ error = file->f_op->romptr(file, &vma);
+#ifdef DEBUG
+ printk("romptr mmap returned %d, start 0x%.8x\n", error,
+ vma.vm_start);
+#endif
+ if (!error)
+ return vma.vm_start;
+ else if (error != -ENOSYS)
+ return error;
+ } else
+#endif /* MAGIC_ROM_PTR */
+ /* Then try full mmap routine, which might return a RAM pointer,
+ or do something truly complicated. */
+
+ if (file->f_op->mmap) {
+ error = file->f_op->mmap(file, &vma);
+
+#ifdef DEBUG
+ printk("mmap mmap returned %d /%x\n", error, vma.vm_start);
+#endif
+ if (!error)
+ return vma.vm_start;
+ else if (error != -ENOSYS)
+ return error;
+ } else
+ return -ENODEV; /* No mapping operations defined */
+
+ /* An ENOSYS error indicates that mmap isn't possible (as opposed to
+ tried but failed) so we'll fall through to the copy. */
+ }
+
+ tblock = (struct mm_tblock_struct *)
+ kmalloc(sizeof(struct mm_tblock_struct), GFP_KERNEL);
+ if (!tblock) {
+ printk("Allocation of tblock for %lu byte allocation from process %d failed\n", len, current->pid);
+ show_buffers();
+ show_free_areas();
+ return -ENOMEM;
+ }
+
+ tblock->rblock = (struct mm_rblock_struct *)
+ kmalloc(sizeof(struct mm_rblock_struct), GFP_KERNEL);
+
+ if (!tblock->rblock) {
+ printk("Allocation of rblock for %lu byte allocation from process %d failed\n", len, current->pid);
+ show_buffers();
+ show_free_areas();
+ kfree(tblock);
+ return -ENOMEM;
+ }
+
+
+ result = kmalloc(len, GFP_KERNEL);
+ if (!result) {
+ printk("Allocation of length %lu from process %d failed\n", len,
+ current->pid);
+ show_buffers();
+ show_free_areas();
+ kfree(tblock->rblock);
+ kfree(tblock);
+ return -ENOMEM;
+ }
+
+ tblock->rblock->refcount = 1;
+ tblock->rblock->kblock = result;
+ tblock->rblock->size = len;
+
+ realalloc += ksize(result);
+ askedalloc += len;
+
+#ifdef WARN_ON_SLACK
+ if ((len+WARN_ON_SLACK) <= ksize(result))
+ printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", len, current->pid, ksize(result)-len);
+#endif
+
+ if (file) {
+ int error;
+ mm_segment_t old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ error = file->f_op->read(file, (char *) result, len, &file->f_pos);
+ set_fs(old_fs);
+ if (error < 0) {
+ kfree(result);
+ kfree(tblock->rblock);
+ kfree(tblock);
+ return error;
+ }
+ if (error<len)
+ memset(result+error, '\0', len-error);
+ } else {
+ memset(result, '\0', len);
+ }
+
+
+ realalloc += ksize(tblock);
+ askedalloc += sizeof(struct mm_tblock_struct);
+
+ realalloc += ksize(tblock->rblock);
+ askedalloc += sizeof(struct mm_rblock_struct);
+
+ tblock->next = current->mm->tblock.next;
+ current->mm->tblock.next = tblock;
+
+#ifdef DEBUG
+ printk("do_mmap:\n");
+ show_process_blocks();
+#endif
+
+ return (unsigned long)result;
+}
+
+int do_munmap(struct mm_struct * mm, unsigned long addr, size_t len)
+{
+ struct mm_tblock_struct * tblock, *tmp;
+
+#ifdef MAGIC_ROM_PTR
+ /* For efficiency's sake, if the pointer is obviously in ROM,
+ don't bother walking the lists to free it */
+ if (is_in_rom(addr))
+ return 0;
+#endif
+
+#ifdef DEBUG
+ printk("do_munmap:\n");
+#endif
+
+ tmp = &mm->tblock; /* dummy head */
+ while ((tblock=tmp->next) && tblock->rblock &&
+ tblock->rblock->kblock != (void*)addr)
+ tmp = tblock;
+
+ if (!tblock) {
+ printk("munmap of non-mmaped memory by process %d (%s): %p\n",
+ current->pid, current->comm, (void*)addr);
+ return -EINVAL;
+ }
+ if (tblock->rblock) {
+ if (!--tblock->rblock->refcount) {
+ if (tblock->rblock->kblock) {
+ realalloc -= ksize(tblock->rblock->kblock);
+ askedalloc -= tblock->rblock->size;
+ kfree(tblock->rblock->kblock);
+ }
+
+ realalloc -= ksize(tblock->rblock);
+ askedalloc -= sizeof(struct mm_rblock_struct);
+ kfree(tblock->rblock);
+ }
+ }
+ tmp->next = tblock->next;
+ realalloc -= ksize(tblock);
+ askedalloc -= sizeof(struct mm_tblock_struct);
+ kfree(tblock);
+
+#ifdef DEBUG
+ show_process_blocks();
+#endif
+
+ return 0;
+}
+
+/* Release all mmaps. */
+void exit_mmap(struct mm_struct * mm)
+{
+ struct mm_tblock_struct *tmp;
+
+ if (!mm)
+ return;
+
+#ifdef DEBUG
+ printk("Exit_mmap:\n");
+#endif
+
+ while((tmp = mm->tblock.next)) {
+ if (tmp->rblock) {
+ if (!--tmp->rblock->refcount) {
+ if (tmp->rblock->kblock) {
+ realalloc -= ksize(tmp->rblock->kblock);
+ askedalloc -= tmp->rblock->size;
+ kfree(tmp->rblock->kblock);
+ }
+ realalloc -= ksize(tmp->rblock);
+ askedalloc -= sizeof(struct mm_rblock_struct);
+ kfree(tmp->rblock);
+ }
+ tmp->rblock = 0;
+ }
+ mm->tblock.next = tmp->next;
+ realalloc -= ksize(tmp);
+ askedalloc -= sizeof(struct mm_tblock_struct);
+ kfree(tmp);
+ }
+
+#ifdef DEBUG
+ show_process_blocks();
+#endif
+}
+
+asmlinkage long sys_munmap(unsigned long addr, size_t len)
+{
+ int ret;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+#endif /* NO_MM */
diff --git a/uClinux-2.4.20-uc1/mmnommu/mprotect.c b/uClinux-2.4.20-uc1/mmnommu/mprotect.c
new file mode 100644
index 0000000..c83cba6
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/mprotect.c
@@ -0,0 +1,18 @@
+/*
+ * linux/mm/mprotect.c
+ *
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ * (C) Copyright 1994 Linus Torvalds
+ */
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+{
+ return -ENOSYS;
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/mremap.c b/uClinux-2.4.20-uc1/mmnommu/mremap.c
new file mode 100644
index 0000000..590703d
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/mremap.c
@@ -0,0 +1,26 @@
+/*
+ * linux/mm/remap.c
+ *
+ * Copyright (c) 2001 Lineo, Inc. David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ * (C) Copyright 1996 Linus Torvalds
+ */
+
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+/*
+ * FIXME: Could do a tradional realloc() in some cases.
+ */
+asmlinkage unsigned long sys_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
+{
+ return -ENOSYS;
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/numa.c b/uClinux-2.4.20-uc1/mmnommu/numa.c
new file mode 100644
index 0000000..0b602ef
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/numa.c
@@ -0,0 +1,130 @@
+/*
+ * Written by Kanoj Sarcar, SGI, Aug 1999
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/spinlock.h>
+
+int numnodes = 1; /* Initialized for UMA platforms */
+
+static bootmem_data_t contig_bootmem_data;
+pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
+
+#ifndef CONFIG_DISCONTIGMEM
+
+/*
+ * This is meant to be invoked by platforms whose physical memory starts
+ * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
+ * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
+ */
+void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
+ unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size)
+{
+ free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
+ zone_start_paddr, zholes_size, pmap);
+}
+
+#endif /* !CONFIG_DISCONTIGMEM */
+
+struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order)
+{
+#ifdef CONFIG_NUMA
+ return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+#else
+ return alloc_pages(gfp_mask, order);
+#endif
+}
+
+#ifdef CONFIG_DISCONTIGMEM
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
+
+void show_free_areas_node(pg_data_t *pgdat)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&node_lock, flags);
+ show_free_areas_core(pgdat);
+ spin_unlock_irqrestore(&node_lock, flags);
+}
+
+/*
+ * Nodes can be initialized parallely, in no particular order.
+ */
+void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
+ unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size)
+{
+ int i, size = 0;
+ struct page *discard;
+
+ if (mem_map == (mem_map_t *)NULL)
+ mem_map = (mem_map_t *)PAGE_OFFSET;
+
+ free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
+ zholes_size, pmap);
+ pgdat->node_id = nid;
+
+ /*
+ * Get space for the valid bitmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ size += zones_size[i];
+ size = LONG_ALIGN((size + 7) >> 3);
+ pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size);
+ memset(pgdat->valid_addr_bitmap, 0, size);
+}
+
+static struct page * alloc_pages_pgdat(pg_data_t *pgdat, unsigned int gfp_mask,
+ unsigned int order)
+{
+ return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+}
+
+/*
+ * This can be refined. Currently, tries to do round robin, instead
+ * should do concentratic circle search, starting from current node.
+ */
+struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ struct page *ret = 0;
+ pg_data_t *start, *temp;
+#ifndef CONFIG_NUMA
+ unsigned long flags;
+ static pg_data_t *next = 0;
+#endif
+
+ if (order >= MAX_ORDER)
+ return NULL;
+#ifdef CONFIG_NUMA
+ temp = NODE_DATA(numa_node_id());
+#else
+ spin_lock_irqsave(&node_lock, flags);
+ if (!next) next = pgdat_list;
+ temp = next;
+ next = next->node_next;
+ spin_unlock_irqrestore(&node_lock, flags);
+#endif
+ start = temp;
+ while (temp) {
+ if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
+ return(ret);
+ temp = temp->node_next;
+ }
+ temp = pgdat_list;
+ while (temp != start) {
+ if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
+ return(ret);
+ temp = temp->node_next;
+ }
+ return(0);
+}
+
+#endif /* CONFIG_DISCONTIGMEM */
diff --git a/uClinux-2.4.20-uc1/mmnommu/oom_kill.c b/uClinux-2.4.20-uc1/mmnommu/oom_kill.c
new file mode 100644
index 0000000..07d0c97
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/oom_kill.c
@@ -0,0 +1,254 @@
+/*
+ * linux/mm/oom_kill.c
+ *
+ * Copyright (C) 1998,2000 Rik van Riel
+ * Thanks go out to Claus Fischer for some serious inspiration and
+ * for goading me into coding this file...
+ *
+ * The routines in this file are used to kill a process when
+ * we're seriously out of memory. This gets called from kswapd()
+ * in linux/mm/vmscan.c when we really run out of memory.
+ *
+ * Since we won't call these routines often (on a well-configured
+ * machine) this file will double as a 'coding guide' and a signpost
+ * for newbie kernel hackers. It features several pointers to major
+ * kernel subsystems and hints as to where to find out what things do.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/timex.h>
+
+/* #define DEBUG */
+
+/**
+ * int_sqrt - oom_kill.c internal function, rough approximation to sqrt
+ * @x: integer of which to calculate the sqrt
+ *
+ * A very rough approximation to the sqrt() function.
+ */
+static unsigned int int_sqrt(unsigned int x)
+{
+ unsigned int out = x;
+ while (x & ~(unsigned int)1) x >>=2, out >>=1;
+ if (x) out -= out >> 2;
+ return (out ? out : 1);
+}
+
+/**
+ * oom_badness - calculate a numeric value for how bad this task has been
+ * @p: task struct of which task we should calculate
+ *
+ * The formula used is relatively simple and documented inline in the
+ * function. The main rationale is that we want to select a good task
+ * to kill when we run out of memory.
+ *
+ * Good in this context means that:
+ * 1) we lose the minimum amount of work done
+ * 2) we recover a large amount of memory
+ * 3) we don't kill anything innocent of eating tons of memory
+ * 4) we want to kill the minimum amount of processes (one)
+ * 5) we try to kill the process the user expects us to kill, this
+ * algorithm has been meticulously tuned to meet the priniciple
+ * of least surprise ... (be careful when you change it)
+ */
+
+static int badness(struct task_struct *p)
+{
+ int points, cpu_time, run_time;
+
+ if (!p->mm)
+ return 0;
+ /*
+ * The memory size of the process is the basis for the badness.
+ */
+ points = p->mm->total_vm;
+
+ /*
+ * CPU time is in seconds and run time is in minutes. There is no
+ * particular reason for this other than that it turned out to work
+ * very well in practice. This is not safe against jiffie wraps
+ * but we don't care _that_ much...
+ */
+ cpu_time = (p->times.tms_utime + p->times.tms_stime) >> (SHIFT_HZ + 3);
+ run_time = (jiffies - p->start_time) >> (SHIFT_HZ + 10);
+
+ points /= int_sqrt(cpu_time);
+ points /= int_sqrt(int_sqrt(run_time));
+
+ /*
+ * Niced processes are most likely less important, so double
+ * their badness points.
+ */
+ if (p->nice > 0)
+ points *= 2;
+
+ /*
+ * Superuser processes are usually more important, so we make it
+ * less likely that we kill those.
+ */
+ if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
+ p->uid == 0 || p->euid == 0)
+ points /= 4;
+
+ /*
+ * We don't want to kill a process with direct hardware access.
+ * Not only could that mess up the hardware, but usually users
+ * tend to only have this flag set on applications they think
+ * of as important.
+ */
+ if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
+ points /= 4;
+#ifdef DEBUG
+ printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n",
+ p->pid, p->comm, points);
+#endif
+ return points;
+}
+
+/*
+ * Simple selection loop. We chose the process with the highest
+ * number of 'points'. We expect the caller will lock the tasklist.
+ *
+ * (not docbooked, we don't want this one cluttering up the manual)
+ */
+static struct task_struct * select_bad_process(void)
+{
+ int maxpoints = 0;
+ struct task_struct *p = NULL;
+ struct task_struct *chosen = NULL;
+
+ for_each_task(p) {
+ if (p->pid) {
+ int points = badness(p);
+ if (points > maxpoints) {
+ chosen = p;
+ maxpoints = points;
+ }
+ }
+ }
+ return chosen;
+}
+
+/**
+ * We must be careful though to never send SIGKILL a process with
+ * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
+ * we select a process with CAP_SYS_RAW_IO set).
+ */
+void oom_kill_task(struct task_struct *p)
+{
+ printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", p->pid, p->comm);
+
+ /*
+ * We give our sacrificial lamb high priority and access to
+ * all the memory it needs. That way it should be able to
+ * exit() and clear out its resources quickly...
+ */
+ p->counter = 5 * HZ;
+ p->flags |= PF_MEMALLOC | PF_MEMDIE;
+
+ /* This process has hardware access, be more careful. */
+ if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO)) {
+ force_sig(SIGTERM, p);
+ } else {
+ force_sig(SIGKILL, p);
+ }
+}
+
+/**
+ * oom_kill - kill the "best" process when we run out of memory
+ *
+ * If we run out of memory, we have the choice between either
+ * killing a random task (bad), letting the system crash (worse)
+ * OR try to be smart about which process to kill. Note that we
+ * don't have to be perfect here, we just have to be good.
+ */
+static void oom_kill(void)
+{
+ struct task_struct *p, *q;
+
+ read_lock(&tasklist_lock);
+ p = select_bad_process();
+
+ /* Found nothing?!?! Either we hang forever, or we panic. */
+ if (p == NULL)
+ panic("Out of memory and no killable processes...\n");
+
+ /* kill all processes that share the ->mm (i.e. all threads) */
+ for_each_task(q) {
+ if (q->mm == p->mm)
+ oom_kill_task(q);
+ }
+ read_unlock(&tasklist_lock);
+
+ /*
+ * Make kswapd go out of the way, so "p" has a good chance of
+ * killing itself before someone else gets the chance to ask
+ * for more memory.
+ */
+ yield();
+ return;
+}
+
+/**
+ * out_of_memory - is the system out of memory?
+ */
+void out_of_memory(void)
+{
+ static unsigned long first, last, count, lastkill;
+ unsigned long now, since;
+
+ /*
+ * Enough swap space left? Not OOM.
+ */
+ if (nr_swap_pages > 0)
+ return;
+
+ now = jiffies;
+ since = now - last;
+ last = now;
+
+ /*
+ * If it's been a long time since last failure,
+ * we're not oom.
+ */
+ last = now;
+ if (since > 5*HZ)
+ goto reset;
+
+ /*
+ * If we haven't tried for at least one second,
+ * we're not really oom.
+ */
+ since = now - first;
+ if (since < HZ)
+ return;
+
+ /*
+ * If we have gotten only a few failures,
+ * we're not really oom.
+ */
+ if (++count < 10)
+ return;
+
+ /*
+ * If we just killed a process, wait a while
+ * to give that task a chance to exit. This
+ * avoids killing multiple processes needlessly.
+ */
+ since = now - lastkill;
+ if (since < HZ*5)
+ return;
+
+ /*
+ * Ok, really out of memory. Kill something.
+ */
+ lastkill = now;
+ oom_kill();
+
+reset:
+ first = now;
+ count = 0;
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/page_alloc.c b/uClinux-2.4.20-uc1/mmnommu/page_alloc.c
new file mode 100644
index 0000000..d3d8dae
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/page_alloc.c
@@ -0,0 +1,892 @@
+/*
+ * linux/mm/page_alloc.c
+ *
+ * Manages the free list, the system allocates free pages here.
+ * Note that kmalloc() lives in slab.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ * Copyright (c) 2001 Lineo Inc., David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+int nr_swap_pages;
+int nr_active_pages;
+int nr_inactive_pages;
+LIST_HEAD(inactive_list);
+LIST_HEAD(active_list);
+pg_data_t *pgdat_list;
+
+/*
+ *
+ * The zone_table array is used to look up the address of the
+ * struct zone corresponding to a given zone number (ZONE_DMA,
+ * ZONE_NORMAL, or ZONE_HIGHMEM).
+ */
+zone_t *zone_table[MAX_NR_ZONES*MAX_NR_NODES];
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
+static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
+static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
+static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
+
+/*
+ * Temporary debugging check.
+ */
+#define BAD_RANGE(zone, page) \
+( \
+ (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
+ || (((page) - mem_map) < (zone)->zone_start_mapnr) \
+ || ((zone) != page_zone(page)) \
+)
+
+/*
+ * Freeing function for a buddy system allocator.
+ * Contrary to prior comments, this is *NOT* hairy, and there
+ * is no reason for anyone not to understand it.
+ *
+ * The concept of a buddy system is to maintain direct-mapped tables
+ * (containing bit values) for memory blocks of various "orders".
+ * The bottom level table contains the map for the smallest allocatable
+ * units of memory (here, pages), and each level above it describes
+ * pairs of units from the levels below, hence, "buddies".
+ * At a high level, all that happens here is marking the table entry
+ * at the bottom level available, and propagating the changes upward
+ * as necessary, plus some accounting needed to play nicely with other
+ * parts of the VM system.
+ * At each level, we keep one bit for each pair of blocks, which
+ * is set to 1 iff only one of the pair is allocated. So when we
+ * are allocating or freeing one, we can derive the state of the
+ * other. That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
+ * If a block is freed, and its buddy is also free, then this
+ * triggers coalescing into a block of larger size.
+ *
+ * -- wli
+ */
+
+static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
+static void __free_pages_ok (struct page *page, unsigned int order)
+{
+ unsigned long index, page_idx, mask, flags;
+ free_area_t *area;
+ struct page *base;
+ zone_t *zone;
+
+ /*
+ * Subtle. We do not want to test this in the inlined part of
+ * __free_page() - it's a rare condition and just increases
+ * cache footprint unnecesserily. So we do an 'incorrect'
+ * decrement on page->count for reserved pages, but this part
+ * makes it safe.
+ */
+ if (PageReserved(page))
+ return;
+
+ /*
+ * Yes, think what happens when other parts of the kernel take
+ * a reference to a page in order to pin it for io. -ben
+ */
+ if (PageLRU(page)) {
+ if (unlikely(in_interrupt()))
+ BUG();
+ lru_cache_del(page);
+ }
+
+ if (page->buffers)
+ BUG();
+ if (page->mapping)
+ BUG();
+ if (!VALID_PAGE(page))
+ BUG();
+ if (PageLocked(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+
+ page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
+
+ /* de-reference all the pages for this order */
+ for (page_idx = 1; page_idx < (1 << order); page_idx++)
+ set_page_count(&page[page_idx], 0);
+
+ if (current->flags & PF_FREE_PAGES)
+ goto local_freelist;
+ back_local_freelist:
+
+ zone = page_zone(page);
+
+ mask = (~0UL) << order;
+ base = zone->zone_mem_map;
+ page_idx = page - base;
+ if (page_idx & ~mask)
+ BUG();
+ index = page_idx >> (1 + order);
+
+ area = zone->free_area + order;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ zone->free_pages -= mask;
+
+ while (mask + (1 << (MAX_ORDER-1))) {
+ struct page *buddy1, *buddy2;
+
+ if (area >= zone->free_area + MAX_ORDER)
+ BUG();
+ if (!__test_and_change_bit(index, area->map))
+ /*
+ * the buddy page is still allocated.
+ */
+ break;
+ /*
+ * Move the buddy up one level.
+ * This code is taking advantage of the identity:
+ * -mask = 1+~mask
+ */
+ buddy1 = base + (page_idx ^ -mask);
+ buddy2 = base + page_idx;
+ if (BAD_RANGE(zone,buddy1))
+ BUG();
+ if (BAD_RANGE(zone,buddy2))
+ BUG();
+
+ list_del(&buddy1->list);
+ mask <<= 1;
+ area++;
+ index >>= 1;
+ page_idx &= mask;
+ }
+ list_add(&(base + page_idx)->list, &area->free_list);
+
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+
+ local_freelist:
+ if (current->nr_local_pages)
+ goto back_local_freelist;
+ if (in_interrupt())
+ goto back_local_freelist;
+
+ list_add(&page->list, &current->local_pages);
+ page->index = order;
+ current->nr_local_pages++;
+}
+
+#define MARK_USED(index, order, area) \
+ __change_bit((index) >> (1+(order)), (area)->map)
+
+static inline struct page * expand (zone_t *zone, struct page *page,
+ unsigned long index, int low, int high, free_area_t * area)
+{
+ unsigned long size = 1 << high;
+
+ while (high > low) {
+ if (BAD_RANGE(zone,page))
+ BUG();
+ area--;
+ high--;
+ size >>= 1;
+ list_add(&(page)->list, &(area)->free_list);
+ MARK_USED(index, high, area);
+ index += size;
+ page += size;
+ }
+ if (BAD_RANGE(zone,page))
+ BUG();
+ return page;
+}
+
+static FASTCALL(struct page * rmqueue(zone_t *zone, unsigned int order));
+static struct page * rmqueue(zone_t *zone, unsigned int order)
+{
+ free_area_t * area = zone->free_area + order;
+ unsigned int curr_order = order;
+ struct list_head *head, *curr;
+ unsigned long flags;
+ struct page *page;
+ int i;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ do {
+ head = &area->free_list;
+ curr = head->next;
+
+ if (curr != head) {
+ unsigned int index;
+
+ page = list_entry(curr, struct page, list);
+ if (BAD_RANGE(zone,page))
+ BUG();
+ list_del(curr);
+ index = page - zone->zone_mem_map;
+ if (curr_order != MAX_ORDER-1)
+ MARK_USED(index, curr_order, area);
+ zone->free_pages -= 1UL << order;
+
+ page = expand(zone, page, index, order, curr_order, area);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ set_page_count(page, 1);
+ if (BAD_RANGE(zone,page))
+ BUG();
+ if (PageLRU(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+
+ /*
+ * we need to reference all the pages for this order,
+ * otherwise if anyone accesses one of the pages with
+ * (get/put) it * will be freed :-(
+ */
+ for (i = 1; i < (1 << order); i++)
+ set_page_count(&page[i], 1);
+
+ return page;
+ }
+ curr_order++;
+ area++;
+ } while (curr_order < MAX_ORDER);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ return NULL;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+struct page *_alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ return __alloc_pages(gfp_mask, order,
+ contig_page_data.node_zonelists+(gfp_mask & GFP_ZONEMASK));
+}
+#endif
+
+static struct page * FASTCALL(balance_classzone(zone_t *, unsigned int, unsigned int, int *));
+static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask, unsigned int order, int * freed)
+{
+ struct page * page = NULL;
+ int __freed = 0, i;
+
+ if (!(gfp_mask & __GFP_WAIT))
+ goto out;
+ if (in_interrupt())
+ BUG();
+
+ current->allocation_order = order;
+ current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
+
+ __freed = try_to_free_pages_zone(classzone, gfp_mask);
+
+ current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES);
+
+ if (current->nr_local_pages) {
+ struct list_head * entry, * local_pages;
+ struct page * tmp;
+ int nr_pages;
+
+ local_pages = &current->local_pages;
+
+ if (likely(__freed)) {
+ /* pick from the last inserted so we're lifo */
+ entry = local_pages->next;
+ do {
+ tmp = list_entry(entry, struct page, list);
+ if (tmp->index == order && memclass(page_zone(tmp), classzone)) {
+ list_del(entry);
+ current->nr_local_pages--;
+ set_page_count(tmp, 1);
+
+ page = tmp;
+
+ /*
+ * we need to reference all the pages for this order,
+ * otherwise if anyone accesses one of the pages with
+ * (get/put) it * will be freed :-(
+ */
+ for (i = 1; i < (1 << order); i++)
+ set_page_count(&page[i], 1);
+
+ if (page->buffers)
+ BUG();
+ if (page->mapping)
+ BUG();
+ if (!VALID_PAGE(page))
+ BUG();
+ if (PageLocked(page))
+ BUG();
+ if (PageLRU(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+ if (PageDirty(page))
+ BUG();
+
+ break;
+ }
+ } while ((entry = entry->next) != local_pages);
+ }
+
+ nr_pages = current->nr_local_pages;
+ /* free in reverse order so that the global order will be lifo */
+ while ((entry = local_pages->prev) != local_pages) {
+ list_del(entry);
+ tmp = list_entry(entry, struct page, list);
+ __free_pages_ok(tmp, tmp->index);
+ if (!nr_pages--)
+ BUG();
+ }
+ current->nr_local_pages = 0;
+ }
+ out:
+ *freed = __freed;
+ return page;
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator:
+ */
+struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
+{
+ unsigned long min;
+ zone_t **zone, * classzone;
+ struct page * page;
+ int freed;
+
+ zone = zonelist->zones;
+ classzone = *zone;
+ if (classzone == NULL)
+ return NULL;
+ min = 1UL << order;
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ min += z->pages_low;
+ if (z->free_pages > min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+
+ classzone->need_balance = 1;
+ mb();
+ if (waitqueue_active(&kswapd_wait))
+ wake_up_interruptible(&kswapd_wait);
+
+ zone = zonelist->zones;
+ min = 1UL << order;
+ for (;;) {
+ unsigned long local_min;
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ local_min = z->pages_min;
+ if (!(gfp_mask & __GFP_WAIT))
+ local_min >>= 2;
+ min += local_min;
+ if (z->free_pages > min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+
+ /* here we're in the low on memory slow path */
+
+rebalance:
+ if (current->flags & (PF_MEMALLOC | PF_MEMDIE)) {
+ zone = zonelist->zones;
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ return NULL;
+ }
+
+ /* Atomic allocations - we can't balance anything */
+ if (!(gfp_mask & __GFP_WAIT))
+ return NULL;
+
+ page = balance_classzone(classzone, gfp_mask, order, &freed);
+ if (page)
+ return page;
+
+ zone = zonelist->zones;
+ min = 1UL << order;
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ min += z->pages_min;
+ if (z->free_pages > min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+
+ /* Don't let big-order allocations loop */
+ if (order > 3)
+ return NULL;
+
+ /* Yield for kswapd, and try again */
+ yield();
+ goto rebalance;
+}
+
+/*
+ * Common helper functions.
+ */
+unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
+{
+ struct page * page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return 0;
+ return (unsigned long) page_address(page);
+}
+
+unsigned long get_zeroed_page(unsigned int gfp_mask)
+{
+ struct page * page;
+
+ page = alloc_pages(gfp_mask, 0);
+ if (page) {
+ void *address = page_address(page);
+ clear_page(address);
+ return (unsigned long) address;
+ }
+ return 0;
+}
+
+void __free_pages(struct page *page, unsigned int order)
+{
+ if (!PageReserved(page) && put_page_testzero(page))
+ __free_pages_ok(page, order);
+}
+
+void free_pages(unsigned long addr, unsigned int order)
+{
+ if (addr != 0)
+ __free_pages(virt_to_page(addr), order);
+}
+
+/*
+ * Total amount of free (allocatable) RAM:
+ */
+unsigned int nr_free_pages (void)
+{
+ unsigned int sum = 0;
+ zone_t *zone;
+
+ for_each_zone(zone)
+ sum += zone->free_pages;
+
+ return sum;
+}
+
+/*
+ * Amount of free RAM allocatable as buffer memory:
+ */
+unsigned int nr_free_buffer_pages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int sum = 0;
+
+ for_each_pgdat(pgdat) {
+ zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
+ zone_t **zonep = zonelist->zones;
+ zone_t *zone;
+
+ for (zone = *zonep++; zone; zone = *zonep++) {
+ unsigned long size = zone->size;
+ unsigned long high = zone->pages_high;
+ if (size > high)
+ sum += size - high;
+ }
+ }
+
+ return sum;
+}
+
+#if CONFIG_HIGHMEM
+unsigned int nr_free_highpages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int pages = 0;
+
+ for_each_pgdat(pgdat)
+ pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+
+ return pages;
+}
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas_core(pg_data_t *pgdat)
+{
+ unsigned int order;
+ unsigned type;
+ pg_data_t *tmpdat = pgdat;
+
+ printk("Free pages: %6dkB (%6dkB HighMem)\n",
+ K(nr_free_pages()),
+ K(nr_free_highpages()));
+
+ while (tmpdat) {
+ zone_t *zone;
+ for (zone = tmpdat->node_zones;
+ zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
+ printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB "
+ "high:%6lukB\n",
+ zone->name,
+ K(zone->free_pages),
+ K(zone->pages_min),
+ K(zone->pages_low),
+ K(zone->pages_high));
+
+ tmpdat = tmpdat->node_next;
+ }
+
+ printk("( Active: %d, inactive: %d, free: %d )\n",
+ nr_active_pages,
+ nr_inactive_pages,
+ nr_free_pages());
+
+ for (type = 0; type < MAX_NR_ZONES; type++) {
+ struct list_head *head, *curr;
+ zone_t *zone = pgdat->node_zones + type;
+ unsigned long nr, total, flags;
+
+ total = 0;
+ if (zone->size) {
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ head = &(zone->free_area + order)->free_list;
+ curr = head;
+ nr = 0;
+ for (;;) {
+ if ((curr = curr->next) == head)
+ break;
+ nr++;
+ }
+ total += nr * (1 << order);
+ printk("%lu*%lukB ", nr, K(1UL) << order);
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+ printk("= %lukB)\n", K(total));
+ }
+
+#ifndef NO_MM
+#ifdef SWAP_CACHE_INFO
+ show_swap_cache_info();
+#endif
+#endif
+}
+
+void show_free_areas(void)
+{
+ show_free_areas_core(pgdat_list);
+}
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static inline void build_zonelists(pg_data_t *pgdat)
+{
+ int i, j, k;
+
+ for (i = 0; i <= GFP_ZONEMASK; i++) {
+ zonelist_t *zonelist;
+ zone_t *zone;
+
+ zonelist = pgdat->node_zonelists + i;
+ memset(zonelist, 0, sizeof(*zonelist));
+
+ j = 0;
+ k = ZONE_NORMAL;
+ if (i & __GFP_HIGHMEM)
+ k = ZONE_HIGHMEM;
+ if (i & __GFP_DMA)
+ k = ZONE_DMA;
+
+ switch (k) {
+ default:
+ BUG();
+ /*
+ * fallthrough:
+ */
+ case ZONE_HIGHMEM:
+ zone = pgdat->node_zones + ZONE_HIGHMEM;
+ if (zone->size) {
+#ifndef CONFIG_HIGHMEM
+ BUG();
+#endif
+ zonelist->zones[j++] = zone;
+ }
+ case ZONE_NORMAL:
+ zone = pgdat->node_zones + ZONE_NORMAL;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ case ZONE_DMA:
+ zone = pgdat->node_zones + ZONE_DMA;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ }
+ zonelist->zones[j++] = NULL;
+ }
+}
+
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+#define PAGES_PER_WAITQUEUE 256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+ unsigned long size = 1;
+
+ pages /= PAGES_PER_WAITQUEUE;
+
+ while (size < pages)
+ size <<= 1;
+
+ /*
+ * Once we have dozens or even hundreds of threads sleeping
+ * on IO we've got bigger problems than wait queue collision.
+ * Limit the size of the wait table to a reasonable size.
+ */
+ size = min(size, 4096UL);
+
+ return size;
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+ return ffz(~size);
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+/*
+ * Set up the zone data structures:
+ * - mark all pages reserved
+ * - mark all memory queues empty
+ * - clear the memory bitmaps
+ */
+void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
+ unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size, struct page *lmem_map)
+{
+ unsigned long i, j;
+ unsigned long map_size;
+ unsigned long totalpages, offset, realtotalpages;
+ const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
+
+ if (zone_start_paddr & ~PAGE_MASK)
+ BUG();
+
+ totalpages = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ unsigned long size = zones_size[i];
+ totalpages += size;
+ }
+ realtotalpages = totalpages;
+ if (zholes_size)
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ realtotalpages -= zholes_size[i];
+
+ printk("On node %d totalpages: %lu\n", nid, realtotalpages);
+
+ /*
+ * Some architectures (with lots of mem and discontinous memory
+ * maps) have to search for a good mem_map area:
+ * For discontigmem, the conceptual mem map array starts from
+ * PAGE_OFFSET, we need to align the actual array onto a mem map
+ * boundary, so that MAP_NR works.
+ */
+ map_size = (totalpages + 1)*sizeof(struct page);
+ if (lmem_map == (struct page *)0) {
+ lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
+ lmem_map = (struct page *)(PAGE_OFFSET +
+ MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
+ }
+ *gmap = pgdat->node_mem_map = lmem_map;
+ pgdat->node_size = totalpages;
+ pgdat->node_start_paddr = zone_start_paddr;
+ pgdat->node_start_mapnr = (lmem_map - mem_map);
+ pgdat->nr_zones = 0;
+
+ offset = lmem_map - mem_map;
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ zone_t *zone = pgdat->node_zones + j;
+ unsigned long mask;
+ unsigned long size, realsize;
+
+ zone_table[nid * MAX_NR_ZONES + j] = zone;
+ realsize = size = zones_size[j];
+ if (zholes_size)
+ realsize -= zholes_size[j];
+
+ printk("zone(%lu): %lu pages.\n", j, size);
+ zone->size = size;
+ zone->name = zone_names[j];
+ zone->lock = SPIN_LOCK_UNLOCKED;
+ zone->zone_pgdat = pgdat;
+ zone->free_pages = 0;
+ zone->need_balance = 0;
+ if (!size)
+ continue;
+
+ /*
+ * The per-page waitqueue mechanism uses hashed waitqueues
+ * per zone.
+ */
+ zone->wait_table_size = wait_table_size(size);
+ zone->wait_table_shift =
+ BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, zone->wait_table_size
+ * sizeof(wait_queue_head_t));
+
+ for(i = 0; i < zone->wait_table_size; ++i)
+ init_waitqueue_head(zone->wait_table + i);
+
+ pgdat->nr_zones = j+1;
+
+ mask = (realsize / zone_balance_ratio[j]);
+ if (mask < zone_balance_min[j])
+ mask = zone_balance_min[j];
+ else if (mask > zone_balance_max[j])
+ mask = zone_balance_max[j];
+ zone->pages_min = mask;
+ zone->pages_low = mask*2;
+ zone->pages_high = mask*3;
+
+ zone->zone_mem_map = mem_map + offset;
+ zone->zone_start_mapnr = offset;
+ zone->zone_start_paddr = zone_start_paddr;
+
+ if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1))
+ printk("BUG: wrong zone alignment, it will crash\n");
+
+ /*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done. Non-atomic initialization, single-pass.
+ */
+ for (i = 0; i < size; i++) {
+ struct page *page = mem_map + offset + i;
+ set_page_zone(page, nid * MAX_NR_ZONES + j);
+ set_page_count(page, 0);
+ SetPageReserved(page);
+ INIT_LIST_HEAD(&page->list);
+ if (j != ZONE_HIGHMEM)
+ set_page_address(page, __va(zone_start_paddr));
+ zone_start_paddr += PAGE_SIZE;
+ }
+
+ offset += size;
+ for (i = 0; ; i++) {
+ unsigned long bitmap_size;
+
+ INIT_LIST_HEAD(&zone->free_area[i].free_list);
+ if (i == MAX_ORDER-1) {
+ zone->free_area[i].map = NULL;
+ break;
+ }
+
+ /*
+ * Page buddy system uses "index >> (i+1)",
+ * where "index" is at most "size-1".
+ *
+ * The extra "+3" is to round down to byte
+ * size (8 bits per byte assumption). Thus
+ * we get "(size-1) >> (i+4)" as the last byte
+ * we can access.
+ *
+ * The "+1" is because we want to round the
+ * byte allocation up rather than down. So
+ * we should have had a "+7" before we shifted
+ * down by three. Also, we have to add one as
+ * we actually _use_ the last bit (it's [0,n]
+ * inclusive, not [0,n[).
+ *
+ * So we actually had +7+1 before we shift
+ * down by 3. But (n+8) >> 3 == (n >> 3) + 1
+ * (modulo overflows, which we do not have).
+ *
+ * Finally, we LONG_ALIGN because all bitmap
+ * operations are on longs.
+ */
+ bitmap_size = (size-1) >> (i+4);
+ bitmap_size = LONG_ALIGN(bitmap_size+1);
+ zone->free_area[i].map =
+ (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
+ }
+ }
+ build_zonelists(pgdat);
+}
+
+void __init free_area_init(unsigned long *zones_size)
+{
+ free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0, 0);
+}
+
+static int __init setup_mem_frac(char *str)
+{
+ int j = 0;
+
+ while (get_option(&str, &zone_balance_ratio[j++]) == 2);
+ printk("setup_mem_frac: ");
+ for (j = 0; j < MAX_NR_ZONES; j++) printk("%d ", zone_balance_ratio[j]);
+ printk("\n");
+ return 1;
+}
+
+__setup("memfrac=", setup_mem_frac);
diff --git a/uClinux-2.4.20-uc1/mmnommu/page_alloc2.c b/uClinux-2.4.20-uc1/mmnommu/page_alloc2.c
new file mode 100644
index 0000000..3c9fb8c
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/page_alloc2.c
@@ -0,0 +1,1015 @@
+/****************************************************************************/
+/*
+ * linux/mmnommu/page_alloc2.c
+ *
+ * Copyright (C) 2001, 2002 David McCullough <davidm@snapgear.com>
+ *
+ * A page allocator that attempts to be better than the
+ * standard power of 2 allocator.
+ *
+ * Based on page_alloc.c, see credits in that file.
+ *
+ */
+/****************************************************************************/
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+/****************************************************************************/
+/*
+ * do we want nasty stuff checking enabled
+ */
+
+#if 0
+#define SADISTIC_PAGE_ALLOC 1
+#endif
+
+/*
+ * Some accounting stuff
+ */
+extern unsigned long askedalloc, realalloc;
+
+int nr_swap_pages;
+int nr_active_pages;
+int nr_inactive_pages;
+struct list_head inactive_list;
+struct list_head active_list;
+pg_data_t *pgdat_list;
+
+#define memlist_init(x) INIT_LIST_HEAD(x)
+#define memlist_add_head list_add
+#define memlist_add_tail list_add_tail
+#define memlist_del list_del
+#define memlist_entry list_entry
+#define memlist_next(x) ((x)->next)
+#define memlist_prev(x) ((x)->prev)
+
+zone_t *zone_table[MAX_NR_ZONES*MAX_NR_NODES];
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
+
+/*
+ * A simple method to save us searching all the reserved kernel
+ * pages every time is to remember where the first free page is
+ */
+
+static char *bit_map = NULL;
+static int bit_map_size = 0;
+static int first_usable_page = 0;
+static int _nr_free_pages = 0;
+
+unsigned int nr_free_pages() { return _nr_free_pages; }
+
+extern struct wait_queue *buffer_wait;
+
+#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
+
+/****************************************************************************/
+
+#if 1
+#define DBG_ALLOC(fmt...)
+#else
+#define DBG_ALLOC(fmt...) printk(##fmt)
+#endif
+
+extern unsigned long __get_contiguous_pages(unsigned int gfp_mask,
+ unsigned long num_adjpages, unsigned int align_order);
+static void find_some_memory(int n);
+
+#ifdef CONFIG_MEM_MAP
+static int mem_map_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+#endif
+
+#ifdef __mc68000__
+#define ALIGN_ORDER(x) 0
+#else
+#define ALIGN_ORDER(x) ((x) == 1 ? 1 : 0)
+#endif
+
+/*
+ * The number of pages that constitute a small allocation that is located
+ * at the top of memory
+ */
+#define SMALL_ALLOC_PAGES 2
+
+/****************************************************************************/
+#ifdef SADISTIC_PAGE_ALLOC
+
+static void
+mem_set(unsigned char *p, unsigned char value, int n)
+{
+ while (n-- > 0)
+ *p++ = value;
+}
+
+static void
+mem_test(unsigned char *p, unsigned char value, int n)
+{
+ while (n-- > 0)
+ if (*p++ != value)
+ break;
+ if (n >= 0)
+ printk("free memory changed 0x%x, 0x%x != 0x%x\n",
+ (unsigned int) p - 1, *(p - 1), value);
+}
+
+#endif
+/****************************************************************************/
+
+void free_contiguous_pages(unsigned long addr, unsigned int num_adjpages)
+{
+ unsigned long map_nr = MAP_NR(addr);
+ unsigned long flags;
+
+ DBG_ALLOC("%s,%d: %s(0x%x, %d)\n", __FILE__, __LINE__, __FUNCTION__,
+ addr, num_adjpages);
+ if (map_nr < bit_map_size) {
+ int freed = 0;
+ mem_map_t *p, *ep;
+
+ p = mem_map + map_nr;
+
+ save_flags(flags);
+ cli();
+
+ if (!put_page_testzero(p)) {
+ restore_flags(flags);
+ return;
+ }
+
+ if (PageReserved(p)) /* we never hand out reserved pages */
+ BUG();
+ if (PageLRU(p))
+ lru_cache_del(p);
+
+
+ if (p->buffers)
+ BUG();
+ if (p->mapping)
+ BUG();
+ if (!VALID_PAGE(p))
+ BUG();
+ if (PageLocked(p))
+ BUG();
+ if (PageActive(p))
+ BUG();
+
+ for (ep = p + num_adjpages; p < ep; p++) {
+ p->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
+#ifdef SADISTIC_PAGE_ALLOC
+ mem_set((char *) page_address(p), 0xdd, PAGE_SIZE);
+#endif
+ if (p-mem_map < first_usable_page)
+ first_usable_page = p-mem_map;
+ clear_bit(p-mem_map, bit_map);
+ set_page_count(p, 0);
+ freed++;
+ _nr_free_pages++;
+ }
+ restore_flags(flags);
+
+ if (waitqueue_active(&kswapd_wait))
+ wake_up_interruptible(&kswapd_wait);
+ }
+}
+
+/****************************************************************************/
+/*
+ * Amount of free RAM allocatable as buffer memory:
+ */
+
+unsigned int nr_free_buffer_pages (void)
+{
+ return _nr_free_pages + nr_active_pages + nr_inactive_pages;
+}
+
+/****************************************************************************/
+/*
+ * We have to keep this interface as some parts of the kernel
+ * source reference them directly
+ */
+
+unsigned long get_zeroed_page(unsigned int gfp_mask)
+{
+ struct page * page;
+
+ page = alloc_pages(gfp_mask, 0);
+ if (page) {
+ void *address = page_address(page);
+ clear_page(address);
+ return (unsigned long) address;
+ }
+ return 0;
+}
+
+void free_pages(unsigned long addr, unsigned int order)
+{
+ DBG_ALLOC("%s,%d: %s(0x%x, %d)\n", __FILE__, __LINE__, __FUNCTION__,
+ addr, order);
+ if (addr != 0)
+ __free_pages(virt_to_page(addr), order);
+}
+
+void __free_pages(struct page *page, unsigned int order)
+{
+ DBG_ALLOC("%s,%d: %s(0x%x[0x%x], %d)\n", __FILE__, __LINE__,
+ __FUNCTION__, page, page_address(page), order);
+
+ if (!PageReserved(page))
+ free_contiguous_pages((unsigned long) page_address(page), 1 << order);
+}
+
+struct page *_alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ unsigned long addr;
+ DBG_ALLOC("%s,%d: %s(0x%x, %d)\n", __FILE__, __LINE__, __FUNCTION__,
+ gfp_mask, order);
+ addr = __get_contiguous_pages(gfp_mask, 1 << order, ALIGN_ORDER(order));
+ if (addr)
+ return(virt_to_page(addr));
+ return(NULL);
+}
+
+struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
+{
+ unsigned long addr;
+ DBG_ALLOC("%s,%d: %s(0x%x, %d)\n", __FILE__, __LINE__, __FUNCTION__,
+ gfp_mask, order);
+ addr = __get_contiguous_pages(gfp_mask, 1 << order, ALIGN_ORDER(order));
+ if (addr)
+ return(virt_to_page(addr));
+ return(NULL);
+}
+
+/****************************************************************************/
+
+static void find_some_memory(int n)
+{
+ int loops = 0, i;
+ pg_data_t * pgdat;
+ zone_t * zone;
+
+ if (in_interrupt()) /* sorry, you lose */
+ return;
+
+ do {
+ pgdat = pgdat_list;
+ do {
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ zone->need_balance = 1;
+ try_to_free_pages_zone(zone, GFP_KSWAPD);
+ }
+ } while ((pgdat = pgdat->node_next));
+ } while (loops++ < n);
+}
+
+/****************************************************************************/
+/*
+ * look through the map for a run of consecutive pages that will
+ * hold a # of pages
+ */
+
+unsigned long
+__get_contiguous_pages(
+ unsigned int gfp_mask,
+ unsigned long num_adjpages,
+ unsigned int align_order)
+{
+ unsigned long flags;
+ mem_map_t *p;
+ int repeats = 0;
+ pg_data_t *pgdat;
+ zone_t *zone;
+
+ DBG_ALLOC("%s,%d: %s(0x%x, %d, %d) - mem_map=0x%x\n", __FILE__, __LINE__,
+ __FUNCTION__, gfp_mask, num_adjpages, align_order, mem_map);
+ save_flags(flags);
+
+ if (waitqueue_active(&kswapd_wait))
+ wake_up_interruptible(&kswapd_wait);
+
+repeat:
+ cli();
+/*
+ * Don't bother trying to find pages unless there are enough
+ * for the given context
+ */
+ if (num_adjpages <= _nr_free_pages) {
+
+ int n = 0, little_alloc = 0, ff;
+
+ p = NULL;
+ if (num_adjpages <= SMALL_ALLOC_PAGES)
+ little_alloc = bit_map_size;
+
+ ff = find_next_zero_bit(bit_map, bit_map_size,
+ num_adjpages <= SMALL_ALLOC_PAGES ? (little_alloc -= 16) :
+ first_usable_page);
+
+ while (ff + num_adjpages <= bit_map_size || little_alloc > 0) {
+ if (ff + num_adjpages <= bit_map_size) {
+ p = mem_map + ff;
+ if (((unsigned long) page_address(p)) &
+ ((PAGE_SIZE << align_order) - 1))
+ n = 0;
+ else
+ for (n = 0; n < num_adjpages; n++, p++) {
+ if (test_bit(p-mem_map, bit_map))
+ break;
+#if 0
+ if (dma && !PageDMA(p))
+ break;
+#endif
+ }
+ if (n >= num_adjpages)
+ break;
+ }
+ ff = find_next_zero_bit(bit_map, bit_map_size,
+ num_adjpages <= SMALL_ALLOC_PAGES ? (little_alloc -= 16) :
+ (ff + n + 1));
+ }
+
+ if (p && n >= num_adjpages) {
+ _nr_free_pages -= num_adjpages;
+ while (n-- > 0) {
+ p--;
+#ifdef SADISTIC_PAGE_ALLOC
+ if (atomic_read(&p->count))
+ printk("allocated a non-free page\n");
+#endif
+ set_page_count(p, 1);
+ set_bit(p-mem_map, bit_map);
+ p->index = 0xa1c20000 | num_adjpages;
+ if (num_adjpages > 0xffff)
+ BUG();
+ }
+#ifdef SADISTIC_PAGE_ALLOC
+ mem_test((char *) page_address(p), 0xdd, num_adjpages * PAGE_SIZE);
+ mem_set((char *) page_address(p), 0xcc, num_adjpages * PAGE_SIZE);
+#endif
+ DBG_ALLOC(" return(0x%x[p=0x%x])\n", page_address(p), p);
+ pgdat = pgdat_list;
+ do { /* try and keep memory freed */
+ int i;
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ zone->need_balance = 1;
+ }
+ } while ((pgdat = pgdat->node_next));
+ restore_flags(flags);
+ return((unsigned long) page_address(p));
+ }
+ }
+ restore_flags(flags);
+ if ((current->flags & PF_MEMALLOC) == 0) {
+ find_some_memory(3);
+ if (repeats++ < 3)
+ goto repeat;
+ printk("%s: allocation of %d pages failed!\n", current->comm,
+ (int) num_adjpages);
+#ifdef CONFIG_MEM_MAP
+ mem_map_read_proc(NULL, NULL, 0, 0, 0, 0);
+#endif
+ out_of_memory(); /* call this too often and we panic */
+ }
+ return(0);
+}
+
+/****************************************************************************/
+/*
+ * as for free_pages, we have to provide this one as well
+ */
+
+unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
+{
+ DBG_ALLOC("%s,%d: %s(0x%x, %d)\n", __FILE__, __LINE__, __FUNCTION__,
+ gfp_mask, order);
+ return(__get_contiguous_pages(gfp_mask, 1 << order, ALIGN_ORDER(order)));
+}
+
+/****************************************************************************/
+/*
+ * dump some stats on how we are doing
+ */
+
+#define PRINTK(a...) (buffer ? (len+=sprintf(buffer+len, a)) : printk(a))
+#define FIXUP(t) if (buffer && len >= count - 80) goto t; else
+
+static int
+print_free_areas(char *buffer, int count)
+{
+ int len = 0;
+ mem_map_t *p, *ep;
+ unsigned long flags, slack;
+ unsigned long min_free = bit_map_size * PAGE_SIZE;
+ unsigned long min_used = bit_map_size * PAGE_SIZE;
+ unsigned long max_free=0, avg_free=0, free_blks=0;
+ unsigned long max_used=0, avg_used=0, used_blks=0;
+
+ find_some_memory(1);
+
+ if (realalloc)
+ slack = (realalloc-askedalloc) * 100 / realalloc;
+ else
+ slack = 0;
+
+ save_flags(flags);
+ cli();
+
+ FIXUP(got_data);
+
+ for (p = mem_map, ep = p + bit_map_size; p < ep; ) {
+ int n;
+
+ n = 0;
+
+ if (test_bit(p-mem_map, bit_map)) {
+ while (p < ep && test_bit(p-mem_map, bit_map)) {
+ n++;
+ p++;
+ }
+ avg_used += n;
+ if (n < min_used)
+ min_used = n;
+ if (n > max_used)
+ max_used = n;
+ used_blks++;
+ } else {
+ while (p < ep && !test_bit(p-mem_map, bit_map)) {
+ n++;
+ p++;
+ }
+ avg_free += n;
+ if (n < min_free)
+ min_free = n;
+ if (n > max_free)
+ max_free = n;
+ free_blks++;
+ }
+ }
+
+ PRINTK("Active: %d, inactive: %d, free: %d\n",
+ nr_active_pages, nr_inactive_pages, nr_free_pages());
+ FIXUP(got_data);
+ PRINTK("Free pages:%8d (%dkB), %%%lu frag, %%%lu slack\n",
+ _nr_free_pages, _nr_free_pages << (PAGE_SHIFT-10),
+ (free_blks * 100) / _nr_free_pages, slack);
+ FIXUP(got_data);
+ PRINTK("Free blks: %8lu min=%lu max=%lu avg=%lu\n",
+ free_blks, min_free, max_free, avg_free / free_blks);
+ FIXUP(got_data);
+ PRINTK("Used blks: %8lu min=%lu max=%lu avg=%lu\n",
+ used_blks, min_used, max_used, avg_used / used_blks);
+ FIXUP(got_data);
+
+got_data:
+ restore_flags(flags);
+ return(len);
+}
+
+#undef FIXUP
+#undef PRINTK
+/****************************************************************************/
+
+void
+show_free_areas(void)
+{
+ (void) print_free_areas(NULL, 0);
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_MEM_MAP)
+ (void) mem_map_read_proc(NULL, NULL, 0, 0, NULL, NULL);
+#endif
+}
+
+/****************************************************************************/
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static inline void build_zonelists(pg_data_t *pgdat)
+{
+ int i, j, k;
+
+ for (i = 0; i <= GFP_ZONEMASK; i++) {
+ zonelist_t *zonelist;
+ zone_t *zone;
+
+ zonelist = pgdat->node_zonelists + i;
+ memset(zonelist, 0, sizeof(*zonelist));
+
+ j = 0;
+ k = ZONE_NORMAL;
+ if (i & __GFP_HIGHMEM)
+ k = ZONE_HIGHMEM;
+ if (i & __GFP_DMA)
+ k = ZONE_DMA;
+
+ switch (k) {
+ default:
+ BUG();
+ /*
+ * fallthrough:
+ */
+ case ZONE_HIGHMEM:
+ zone = pgdat->node_zones + ZONE_HIGHMEM;
+ if (zone->size) {
+#ifndef CONFIG_HIGHMEM
+ BUG();
+#endif
+ zonelist->zones[j++] = zone;
+ }
+ case ZONE_NORMAL:
+ zone = pgdat->node_zones + ZONE_NORMAL;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ case ZONE_DMA:
+ zone = pgdat->node_zones + ZONE_DMA;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ }
+ zonelist->zones[j++] = NULL;
+ }
+}
+
+/****************************************************************************/
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+
+#define PAGES_PER_WAITQUEUE 256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+ unsigned long size = 1;
+
+ pages /= PAGES_PER_WAITQUEUE;
+
+ while (size < pages)
+ size <<= 1;
+
+ /*
+ * Once we have dozens or even hundreds of threads sleeping
+ * on IO we've got bigger problems than wait queue collision.
+ * Limit the size of the wait table to a reasonable size.
+ */
+ size = min(size, 4096UL);
+
+ return size;
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+ return ffz(~size);
+}
+
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+/****************************************************************************/
+/*
+ * Set up the zone data structures:
+ * - mark all pages reserved
+ * - mark all memory queues empty
+ * - clear the memory bitmaps
+ *
+ * static in this version because I haven't though it out yet ;-)
+ */
+
+void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
+ unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size, struct page *lmem_map)
+{
+ unsigned long i, j;
+ unsigned long map_size;
+ unsigned long totalpages, offset, realtotalpages;
+ const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
+
+ if (zone_start_paddr & ~PAGE_MASK)
+ BUG();
+
+ totalpages = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ unsigned long size = zones_size[i];
+ totalpages += size;
+ }
+ realtotalpages = totalpages;
+ if (zholes_size)
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ realtotalpages -= zholes_size[i];
+
+ printk("On node %d totalpages: %lu\n", nid, realtotalpages);
+
+ INIT_LIST_HEAD(&active_list);
+ INIT_LIST_HEAD(&inactive_list);
+
+ /*
+ * Some architectures (with lots of mem and discontinous memory
+ * maps) have to search for a good mem_map area:
+ * For discontigmem, the conceptual mem map array starts from
+ * PAGE_OFFSET, we need to align the actual array onto a mem map
+ * boundary, so that MAP_NR works.
+ */
+ map_size = (totalpages + 1)*sizeof(struct page);
+ if (lmem_map == (struct page *)0) {
+ lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
+ lmem_map = (struct page *)(PAGE_OFFSET +
+ MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
+ }
+ *gmap = pgdat->node_mem_map = lmem_map;
+ pgdat->node_size = totalpages;
+ pgdat->node_start_paddr = zone_start_paddr;
+ pgdat->node_start_mapnr = (lmem_map - mem_map);
+ pgdat->nr_zones = 0;
+
+ /*
+ * as we free pages we mark the first page that is usable
+ */
+ bit_map_size = totalpages;
+ bit_map = (unsigned char *)
+ alloc_bootmem_node(pgdat, LONG_ALIGN(bit_map_size / 8));
+ memset(bit_map, 0, LONG_ALIGN(bit_map_size / 8));
+
+ /*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done.
+ */
+
+ first_usable_page = totalpages;
+
+ offset = lmem_map - mem_map;
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ zone_t *zone = pgdat->node_zones + j;
+ unsigned long size, realsize;
+
+ zone_table[nid * MAX_NR_ZONES + j] = zone;
+ realsize = size = zones_size[j];
+ if (zholes_size)
+ realsize -= zholes_size[j];
+
+ printk("zone(%lu): %lu pages.\n", j, size);
+ zone->size = size;
+ zone->name = zone_names[j];
+ zone->lock = SPIN_LOCK_UNLOCKED;
+ zone->zone_pgdat = pgdat;
+ zone->free_pages = 0;
+ zone->need_balance = 0;
+ if (!size)
+ continue;
+
+ /*
+ * The per-page waitqueue mechanism uses hashed waitqueues
+ * per zone.
+ */
+ zone->wait_table_size = wait_table_size(size);
+ zone->wait_table_shift =
+ BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, zone->wait_table_size
+ * sizeof(wait_queue_head_t));
+
+ for (i = 0; i < zone->wait_table_size; ++i)
+ init_waitqueue_head(zone->wait_table + i);
+
+ pgdat->nr_zones = j+1;
+
+ zone->pages_min = 0;
+ zone->pages_low = 0;
+ zone->pages_high = realsize; /* very agressive, always free pages */
+
+ zone->zone_mem_map = mem_map + offset;
+ zone->zone_start_mapnr = offset;
+ zone->zone_start_paddr = zone_start_paddr;
+
+ if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1))
+ printk("BUG: wrong zone alignment, it will crash\n");
+
+ for (i = 0; i < size; i++) {
+ struct page *page = mem_map + offset + i;
+ set_page_zone(page, nid * MAX_NR_ZONES + j);
+ set_page_count(page, 0);
+ SetPageReserved(page);
+ set_bit(page-mem_map, bit_map);
+ INIT_LIST_HEAD(&page->list);
+ if (j != ZONE_HIGHMEM)
+ set_page_address(page, __va(zone_start_paddr));
+ zone_start_paddr += PAGE_SIZE;
+ }
+
+ offset += size;
+ }
+ build_zonelists(pgdat);
+}
+
+/****************************************************************************/
+
+void __init free_area_init(unsigned long *zones_size)
+{
+ free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0, 0);
+}
+
+/****************************************************************************/
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_MEM_MAP)
+/****************************************************************************/
+/*
+ * A small tool to help debug/display memory allocation problems
+ * Creates /proc/mem_map, an ascii representation of what each
+ * page in memory is being used for. It displays the address of the
+ * memory down the left column and 64 pages per line (ie., 256K).
+ *
+ * If you want better reporting, define MEGA_HACK below and then
+ * find all the referenced FS routines in the kernel and remove static
+ * from their definition (see page_alloc2.hack for patch).
+ *
+ * Obviously this code needs proc_fs, but it is trivial to make it
+ * use printk and always include it.
+ *
+ * KEY:
+ *
+ * Normal letters
+ * --------------
+ * - free
+ * R reserved (usually the kernel/mem_map/bitmap)
+ * X owned by a device/fs (see MEGA_HACK code)
+ * S swap cache
+ * L locked
+ * A Active
+ * U LRU
+ * s owned by the slab allocator
+ * r referenced
+ * C non zero count
+ * ? who knows ?
+ *
+ * Contiguous Page Alloc
+ * ---------------------
+ * 1 a single page_alloc2 page
+ * [=*] contigous pages allocated by page_alloc2
+ *
+ * MEGA HACK values
+ * ---------------------
+ * * ram disk
+ * # romfs
+ * M minix
+ * % ext2
+ * B block dev (cache etc)
+ *
+ * TODO:
+ * print process name for contiguous blocks
+ */
+/****************************************************************************/
+
+#if 0
+#define MEGA_HACK 1
+#endif
+
+/****************************************************************************/
+
+#define PRINTK(a...) (page ? (len += sprintf(page + len, a)) : printk(a))
+
+#define FIXUP(t) \
+ if (page) { \
+ if (len <= off) { \
+ off -= len; \
+ len = 0; \
+ } else { \
+ if (len-off > count - 80) \
+ goto t; \
+ } \
+ } else
+
+
+static int
+mem_map_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ struct page *p, *ep;
+ int cols;
+ int flags;
+
+ save_flags(flags);
+ cli();
+
+ FIXUP(got_data);
+
+ cols = 0;
+ for (p = mem_map, ep = p + bit_map_size; p < ep; p++) {
+#ifdef MEGA_HACK
+ extern int blkdev_readpage(struct page *page);
+# ifdef CONFIG_BLK_DEV_RAM
+ extern int ramdisk_readpage(struct page *page);
+# endif
+# ifdef CONFIG_ROMFS_FS
+ extern int romfs_readpage(struct page *page);
+# endif
+# ifdef CONFIG_EXT2_FS
+ extern int ext2_readpage(struct page *page);
+# endif
+# ifdef CONFIG_MINIX_FS
+ extern int minix_readpage(struct page *page);
+# endif
+#endif
+
+ if (cols == 0)
+ PRINTK("0x%08x: ",(unsigned)page_address(p));
+ if (test_bit(p-mem_map, bit_map)) {
+ if (PageReserved(p))
+ PRINTK("R");
+ else if (p->mapping && p->mapping->a_ops) {
+#ifdef MEGA_HACK
+ if (p->mapping->a_ops->readpage == blkdev_readpage)
+ PRINTK("B");
+ else
+# ifdef CONFIG_BLK_DEV_RAM
+ if (p->mapping->a_ops->readpage == ramdisk_readpage)
+ PRINTK("*");
+ else
+# endif
+# ifdef CONFIG_ROMFS_FS
+ if (p->mapping->a_ops->readpage == romfs_readpage)
+ PRINTK("#");
+ else
+# endif
+# ifdef CONFIG_MINIX_FS
+ if (p->mapping->a_ops->readpage == minix_readpage)
+ PRINTK("M");
+ else
+# endif
+# ifdef CONFIG_EXT2_FS
+ if (p->mapping->a_ops->readpage == ext2_readpage)
+ PRINTK("%");
+ else
+# endif
+#endif
+ PRINTK("X");
+ } else if (PageSwapCache(p))
+ PRINTK("S");
+ else if (PageLocked(p))
+ PRINTK("L");
+ else if (PageActive(p))
+ PRINTK("A");
+ else if (PageLRU(p))
+ PRINTK("U");
+ else if (PageSlab(p))
+ PRINTK("s");
+ else if (p->flags & (1<<PG_referenced))
+ PRINTK("r");
+ else if (atomic_read(&p->count)) {
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ if ((p->index & ~0xffff) == 0xa1c20000) {
+ if ((p->index & 0xffff) == 1)
+ PRINTK("1");
+ else {
+ int i = p->index & 0xffff;
+ PRINTK("["); p++; i--; cols++;
+ if (cols >= 64) {
+ PRINTK("\n");
+ cols = 0;
+ FIXUP(got_data);
+ }
+ while (i > 1) {
+ if (cols == 0)
+ PRINTK("0x%08x: ",(unsigned)page_address(p));
+ PRINTK("="); p++; i--; cols++;
+ if (cols >= 64) {
+ PRINTK("\n");
+ cols = 0;
+ FIXUP(got_data);
+ }
+ }
+ if (cols == 0)
+ PRINTK("0x%08x: ",(unsigned)page_address(p));
+ PRINTK("]");
+ }
+ } else
+#endif
+ PRINTK("C");
+ } else
+ PRINTK("?");
+ } else
+ PRINTK("-");
+ cols++;
+ if (cols >= 64) {
+ PRINTK("\n");
+ cols = 0;
+ FIXUP(got_data);
+ }
+ }
+ if (cols)
+ PRINTK("\n");
+ FIXUP(got_data);
+ PRINTK("\n");
+ FIXUP(got_data);
+
+{
+ unsigned long total_bytes = 0, total_sbytes = 0, total_slack = 0;
+ struct task_struct *p;
+
+ for_each_task(p) {
+ struct mm_struct *mm = p->mm;
+ unsigned long bytes = 0, sbytes = 0, slack = 0;
+ struct mm_tblock_struct * tblock;
+
+ if (!mm)
+ continue;
+
+ for (tblock = &mm->tblock; tblock; tblock = tblock->next) {
+ if (tblock->rblock) {
+ bytes += ksize(tblock);
+ if (atomic_read(&mm->mm_count) > 1 ||
+ tblock->rblock->refcount > 1) {
+ sbytes += ksize(tblock->rblock->kblock);
+ sbytes += ksize(tblock->rblock) ;
+ } else {
+ bytes += ksize(tblock->rblock->kblock);
+ bytes += ksize(tblock->rblock) ;
+ slack += ksize(tblock->rblock->kblock) - tblock->rblock->size;
+ }
+ }
+ }
+
+ ((atomic_read(&mm->mm_count) > 1) ? sbytes : bytes)
+ += ksize(mm);
+ (current->fs && atomic_read(&current->fs->count) > 1 ? sbytes : bytes)
+ += ksize(current->fs);
+ (current->files && atomic_read(&current->files->count) > 1 ? sbytes : bytes)
+ += ksize(current->files);
+ (current->sig && atomic_read(&current->sig->count) > 1 ? sbytes : bytes)
+ += ksize(current->sig);
+ bytes += ksize(current); /* includes kernel stack */
+
+ PRINTK("%-16s Mem:%8lu Slack:%8lu Shared:%8lu\n", p->comm, bytes,
+ slack, sbytes);
+ FIXUP(got_data);
+ total_slack += slack;
+ total_sbytes += sbytes;
+ total_bytes += bytes;
+ }
+ PRINTK("%-16s Mem:%8lu Slack:%8lu Shared:%8lu\n\n", "Total", total_bytes,
+ total_slack, total_sbytes);
+ FIXUP(got_data);
+}
+
+ len += print_free_areas(page + len, count - len);
+ FIXUP(got_data);
+
+got_data:
+ restore_flags(flags);
+
+ if (page) {
+ *start = page+off;
+
+ len -= (*start-page);
+ if (len <= count - 80)
+ *eof = 1;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ }
+ return(len);
+}
+
+#undef FIXUP
+#undef PRINTK
+/****************************************************************************/
+
+static __init int
+page_alloc2_init(void)
+{
+ create_proc_read_entry("mem_map", S_IWUSR | S_IRUGO, NULL,
+ mem_map_read_proc, NULL);
+ return(0);
+}
+
+/****************************************************************************/
+
+module_init(page_alloc2_init);
+
+/****************************************************************************/
+#endif /* CONFIG_PROC_FS && CONFIG_MEM_MAP */
+/****************************************************************************/
diff --git a/uClinux-2.4.20-uc1/mmnommu/page_alloc2.hack b/uClinux-2.4.20-uc1/mmnommu/page_alloc2.hack
new file mode 100644
index 0000000..6ca320d
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/page_alloc2.hack
@@ -0,0 +1,64 @@
+Index: fs/block_dev.c
+===================================================================
+RCS file: /cvs/sw/linux-2.4.x/fs/block_dev.c,v
+retrieving revision 1.1.1.4
+diff -u -r1.1.1.4 block_dev.c
+--- fs/block_dev.c 7 Jan 2002 23:15:54 -0000 1.1.1.4
++++ fs/block_dev.c 5 Feb 2002 01:55:11 -0000
+@@ -123,7 +123,7 @@
+ return block_write_full_page(page, blkdev_get_block);
+ }
+
+-static int blkdev_readpage(struct file * file, struct page * page)
++int blkdev_readpage(struct file * file, struct page * page)
+ {
+ return block_read_full_page(page, blkdev_get_block);
+ }
+Index: fs/ext2/inode.c
+===================================================================
+RCS file: /cvs/sw/linux-2.4.x/fs/ext2/inode.c,v
+retrieving revision 1.1.1.4
+diff -u -r1.1.1.4 inode.c
+--- fs/ext2/inode.c 7 Jan 2002 23:15:55 -0000 1.1.1.4
++++ fs/ext2/inode.c 5 Feb 2002 01:55:12 -0000
+@@ -580,7 +580,7 @@
+ {
+ return block_write_full_page(page,ext2_get_block);
+ }
+-static int ext2_readpage(struct file *file, struct page *page)
++int ext2_readpage(struct file *file, struct page *page)
+ {
+ return block_read_full_page(page,ext2_get_block);
+ }
+Index: fs/romfs/inode.c
+===================================================================
+RCS file: /cvs/sw/linux-2.4.x/fs/romfs/inode.c,v
+retrieving revision 1.3
+diff -u -r1.3 inode.c
+--- fs/romfs/inode.c 8 Jan 2002 00:51:17 -0000 1.3
++++ fs/romfs/inode.c 5 Feb 2002 01:55:13 -0000
+@@ -390,7 +390,7 @@
+ * we can't use bmap, since we may have looser alignments.
+ */
+
+-static int
++int
+ romfs_readpage(struct file *file, struct page * page)
+ {
+ struct inode *inode = page->mapping->host;
+Index: drivers/block/rd.c
+===================================================================
+RCS file: /cvs/sw/linux-2.4.x/drivers/block/rd.c,v
+retrieving revision 1.1.1.3
+diff -u -r1.1.1.3 rd.c
+--- drivers/block/rd.c 7 Jan 2002 23:15:32 -0000 1.1.1.3
++++ drivers/block/rd.c 5 Feb 2002 01:55:14 -0000
+@@ -191,7 +191,7 @@
+ * 2000 Transmeta Corp.
+ * aops copied from ramfs.
+ */
+-static int ramdisk_readpage(struct file *file, struct page * page)
++int ramdisk_readpage(struct file *file, struct page * page)
+ {
+ if (!Page_Uptodate(page)) {
+ memset(kmap(page), 0, PAGE_CACHE_SIZE);
diff --git a/uClinux-2.4.20-uc1/mmnommu/slab.c b/uClinux-2.4.20-uc1/mmnommu/slab.c
new file mode 100644
index 0000000..993f8cb
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/slab.c
@@ -0,0 +1,2180 @@
+/*
+ * linux/mm/slab.c
+ * Written by Mark Hemment, 1996/97.
+ * (markhe@nextd.demon.co.uk)
+ *
+ * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
+ *
+ * Major cleanup, different bufctl logic, per-cpu arrays
+ * (c) 2000 Manfred Spraul
+ *
+ * An implementation of the Slab Allocator as described in outline in;
+ * UNIX Internals: The New Frontiers by Uresh Vahalia
+ * Pub: Prentice Hall ISBN 0-13-101908-2
+ * or with a little more detail in;
+ * The Slab Allocator: An Object-Caching Kernel Memory Allocator
+ * Jeff Bonwick (Sun Microsystems).
+ * Presented at: USENIX Summer 1994 Technical Conference
+ *
+ *
+ * The memory is organized in caches, one cache for each object type.
+ * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
+ * Each cache consists out of many slabs (they are small (usually one
+ * page long) and always contiguous), and each slab contains multiple
+ * initialized objects.
+ *
+ * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
+ * normal). If you need a special memory type, then must create a new
+ * cache for that memory type.
+ *
+ * In order to reduce fragmentation, the slabs are sorted in 3 groups:
+ * full slabs with 0 free objects
+ * partial slabs
+ * empty slabs with no allocated objects
+ *
+ * If partial slabs exist, then new allocations come from these slabs,
+ * otherwise from empty slabs or new slabs are allocated.
+ *
+ * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
+ * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
+ *
+ * On SMP systems, each cache has a short per-cpu head array, most allocs
+ * and frees go into that array, and if that array overflows, then 1/2
+ * of the entries in the array are given back into the global cache.
+ * This reduces the number of spinlock operations.
+ *
+ * The c_cpuarray may not be read with enabled local interrupts.
+ *
+ * SMP synchronization:
+ * constructors and destructors are called without any locking.
+ * Several members in kmem_cache_t and slab_t never change, they
+ * are accessed without any locking.
+ * The per-cpu arrays are never accessed from the wrong cpu, no locking.
+ * The non-constant members are protected with a per-cache irq spinlock.
+ *
+ * Further notes from the original documentation:
+ *
+ * 11 April '97. Started multi-threading - markhe
+ * The global cache-chain is protected by the semaphore 'cache_chain_sem'.
+ * The sem is only needed when accessing/extending the cache-chain, which
+ * can never happen inside an interrupt (kmem_cache_create(),
+ * kmem_cache_shrink() and kmem_cache_reap()).
+ *
+ * To prevent kmem_cache_shrink() trying to shrink a 'growing' cache (which
+ * maybe be sleeping and therefore not holding the semaphore/lock), the
+ * growing field is used. This also prevents reaping from a cache.
+ *
+ * At present, each engine can be growing a cache. This should be blocked.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/seq_file.h>
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+#include <linux/pagemap.h>
+#endif
+#include <asm/uaccess.h>
+
+
+extern unsigned long __get_contiguous_pages(unsigned int gfp_mask,
+ unsigned long num, unsigned int align_order);
+extern void free_contiguous_pages(unsigned long addr, unsigned int num);
+
+
+/*
+ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
+ * SLAB_RED_ZONE & SLAB_POISON.
+ * 0 for faster, smaller code (especially in the critical paths).
+ *
+ * STATS - 1 to collect stats for /proc/slabinfo.
+ * 0 for faster, smaller code (especially in the critical paths).
+ *
+ * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
+ */
+
+#ifdef CONFIG_DEBUG_SLAB
+#define DEBUG 1
+#define STATS 1
+#define FORCED_DEBUG 1
+#else
+#define DEBUG 0
+#define STATS 0
+#define FORCED_DEBUG 0
+#endif
+
+/*
+ * Parameters for kmem_cache_reap
+ */
+#define REAP_SCANLEN 10
+#define REAP_PERFECT 10
+
+/* Shouldn't this be in a header file somewhere? */
+#if (defined(CONFIG_UCSIMM) || defined(CONFIG_UCDIMM) || defined(CONFIG_DRAGEN2)) && !DEBUG
+/* a size other than 32 bits break the debug code */
+#define BYTES_PER_WORD sizeof(short)
+#else
+#define BYTES_PER_WORD sizeof(void *)
+#endif
+
+/* Legal flag mask for kmem_cache_create(). */
+#if DEBUG
+# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
+ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
+ SLAB_NO_REAP | SLAB_CACHE_DMA | \
+ SLAB_MUST_HWCACHE_ALIGN)
+#else
+# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
+ SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN)
+#endif
+
+/*
+ * kmem_bufctl_t:
+ *
+ * Bufctl's are used for linking objs within a slab
+ * linked offsets.
+ *
+ * This implementation relies on "struct page" for locating the cache &
+ * slab an object belongs to.
+ * This allows the bufctl structure to be small (one int), but limits
+ * the number of objects a slab (not a cache) can contain when off-slab
+ * bufctls are used. The limit is the size of the largest general cache
+ * that does not use off-slab slabs.
+ * For 32bit archs with 4 kB pages, is this 56.
+ * This is not serious, as it is only for large objects, when it is unwise
+ * to have too many per slab.
+ * Note: This limit can be raised by introducing a general cache whose size
+ * is less than 512 (PAGE_SIZE<<3), but greater than 256.
+ */
+
+#define BUFCTL_END 0xffffFFFF
+#define SLAB_LIMIT 0xffffFFFE
+typedef unsigned int kmem_bufctl_t;
+
+/* Max number of objs-per-slab for caches which use off-slab slabs.
+ * Needed to avoid a possible looping condition in kmem_cache_grow().
+ */
+static unsigned long offslab_limit;
+
+/*
+ * slab_t
+ *
+ * Manages the objs in a slab. Placed either at the beginning of mem allocated
+ * for a slab, or allocated from an general cache.
+ * Slabs are chained into three list: fully used, partial, fully free slabs.
+ */
+typedef struct slab_s {
+ struct list_head list;
+ unsigned long colouroff;
+ void *s_mem; /* including colour offset */
+ unsigned int inuse; /* num of objs active in slab */
+ kmem_bufctl_t free;
+} slab_t;
+
+#define slab_bufctl(slabp) \
+ ((kmem_bufctl_t *)(((slab_t*)slabp)+1))
+
+/*
+ * cpucache_t
+ *
+ * Per cpu structures
+ * The limit is stored in the per-cpu structure to reduce the data cache
+ * footprint.
+ */
+typedef struct cpucache_s {
+ unsigned int avail;
+ unsigned int limit;
+} cpucache_t;
+
+#define cc_entry(cpucache) \
+ ((void **)(((cpucache_t*)(cpucache))+1))
+#define cc_data(cachep) \
+ ((cachep)->cpudata[smp_processor_id()])
+/*
+ * kmem_cache_t
+ *
+ * manages a cache.
+ */
+
+#define CACHE_NAMELEN 20 /* max name length for a slab cache */
+
+struct kmem_cache_s {
+/* 1) each alloc & free */
+ /* full, partial first, then free */
+ struct list_head slabs_full;
+ struct list_head slabs_partial;
+ struct list_head slabs_free;
+ unsigned int objsize;
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+ spinlock_t spinlock;
+#ifdef CONFIG_SMP
+ unsigned int batchcount;
+#endif
+
+/* 2) slab additions /removals */
+ /* order of pgs per slab (2^n) */
+ unsigned int gfporder;
+
+ /* force GFP flags, e.g. GFP_DMA */
+ unsigned int gfpflags;
+
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ unsigned int colour_next; /* cache colouring */
+ kmem_cache_t *slabp_cache;
+ unsigned int growing;
+ unsigned int dflags; /* dynamic flags */
+
+ /* constructor func */
+ void (*ctor)(void *, kmem_cache_t *, unsigned long);
+
+ /* de-constructor func */
+ void (*dtor)(void *, kmem_cache_t *, unsigned long);
+
+ unsigned long failures;
+
+/* 3) cache creation/removal */
+ char name[CACHE_NAMELEN];
+ struct list_head next;
+#ifdef CONFIG_SMP
+/* 4) per-cpu data */
+ cpucache_t *cpudata[NR_CPUS];
+#endif
+#if STATS
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+#ifdef CONFIG_SMP
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
+#endif
+#endif
+};
+
+/* internal c_flags */
+#define CFLGS_OFF_SLAB 0x010000UL /* slab management in own cache */
+#define CFLGS_OPTIMIZE 0x020000UL /* optimized slab lookup */
+
+/* c_dflags (dynamic flags). Need to hold the spinlock to access this member */
+#define DFLGS_GROWN 0x000001UL /* don't reap a recently grown */
+
+#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
+#define OPTIMIZE(x) ((x)->flags & CFLGS_OPTIMIZE)
+#define GROWN(x) ((x)->dlags & DFLGS_GROWN)
+
+#if STATS
+#define STATS_INC_ACTIVE(x) ((x)->num_active++)
+#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
+#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
+#define STATS_INC_GROWN(x) ((x)->grown++)
+#define STATS_INC_REAPED(x) ((x)->reaped++)
+#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
+ (x)->high_mark = (x)->num_active; \
+ } while (0)
+#define STATS_INC_ERR(x) ((x)->errors++)
+#else
+#define STATS_INC_ACTIVE(x) do { } while (0)
+#define STATS_DEC_ACTIVE(x) do { } while (0)
+#define STATS_INC_ALLOCED(x) do { } while (0)
+#define STATS_INC_GROWN(x) do { } while (0)
+#define STATS_INC_REAPED(x) do { } while (0)
+#define STATS_SET_HIGH(x) do { } while (0)
+#define STATS_INC_ERR(x) do { } while (0)
+#endif
+
+#if STATS && defined(CONFIG_SMP)
+#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
+#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
+#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
+#else
+#define STATS_INC_ALLOCHIT(x) do { } while (0)
+#define STATS_INC_ALLOCMISS(x) do { } while (0)
+#define STATS_INC_FREEHIT(x) do { } while (0)
+#define STATS_INC_FREEMISS(x) do { } while (0)
+#endif
+
+#if DEBUG
+/* Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_MAGIC1 0x5A2CF071UL /* when obj is active */
+#define RED_MAGIC2 0x170FC2A5UL /* when obj is inactive */
+
+/* ...and for poisoning */
+#define POISON_BYTE 0x5a /* byte value for poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
+
+#endif
+
+/* maximum size of an obj (in 2^order pages) */
+#ifndef NO_MM
+#define MAX_OBJ_ORDER 5 /* 32 pages */
+#elif defined (CONFIG_NO_MMU_LARGE_ALLOCS)
+#define MAX_OBJ_ORDER 13 /* up to 32Mb */
+#else
+#define MAX_OBJ_ORDER 8 /* up to 1MB */
+#endif
+
+/*
+ * Do not go above this order unless 0 objects fit into the slab.
+ */
+#define BREAK_GFP_ORDER_HI 2
+#define BREAK_GFP_ORDER_LO 1
+static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
+
+/*
+ * Absolute limit for the gfp order
+ */
+#ifndef NO_MM
+#define MAX_GFP_ORDER 5 /* 32 pages */
+#elif defined (CONFIG_NO_MMU_LARGE_ALLOCS)
+#define MAX_GFP_ORDER 13 /* up to 32MB */
+#else
+#define MAX_GFP_ORDER 8 /* up to 1MB */
+#endif
+
+
+/* Macros for storing/retrieving the cachep and or slab from the
+ * global 'mem_map'. These are used to find the slab an obj belongs to.
+ * With kfree(), these are used to find the cache which an obj belongs to.
+ */
+#define SET_PAGE_CACHE(pg,x) ((pg)->list.next = (struct list_head *)(x))
+#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->list.next)
+#define SET_PAGE_SLAB(pg,x) ((pg)->list.prev = (struct list_head *)(x))
+#define GET_PAGE_SLAB(pg) ((slab_t *)(pg)->list.prev)
+
+/* Size description struct for general caches. */
+typedef struct cache_sizes {
+ size_t cs_size;
+ kmem_cache_t *cs_cachep;
+ kmem_cache_t *cs_dmacachep;
+} cache_sizes_t;
+
+static cache_sizes_t cache_sizes[] = {
+#if PAGE_SIZE == 4096
+ { 32, NULL, NULL},
+#endif
+ { 64, NULL, NULL},
+ { 128, NULL, NULL},
+ { 256, NULL, NULL},
+ { 512, NULL, NULL},
+ { 1024, NULL, NULL},
+ { 2048, NULL, NULL},
+ { 4096, NULL, NULL},
+#ifndef CONFIG_CONTIGUOUS_PAGE_ALLOC /* assumes page size of 4096 */
+ { 8192, NULL, NULL},
+ { 16384, NULL, NULL},
+ { 32768, NULL, NULL},
+ { 65536, NULL, NULL},
+ {131072, NULL, NULL},
+#ifdef NO_MM
+ {262144, NULL, NULL},
+ {524288, NULL, NULL},
+ {1048576, NULL, NULL},
+#ifdef CONFIG_NO_MMU_LARGE_ALLOCS
+ {2097152, NULL, NULL},
+ {4194304, NULL, NULL},
+ {8388608, NULL, NULL},
+ {16777216, NULL, NULL},
+ {33554432, NULL, NULL},
+#endif /* CONFIG_NO_MMU_LARGE_ALLOCS */
+#endif /* NO_MM */
+#endif /* CONFIG_CONTIGUOUS_PAGE_ALLOC */
+ { 0, NULL, NULL}
+};
+
+/* internal cache of cache description objs */
+static kmem_cache_t cache_cache = {
+ slabs_full: LIST_HEAD_INIT(cache_cache.slabs_full),
+ slabs_partial: LIST_HEAD_INIT(cache_cache.slabs_partial),
+ slabs_free: LIST_HEAD_INIT(cache_cache.slabs_free),
+ objsize: sizeof(kmem_cache_t),
+ flags: SLAB_NO_REAP,
+ spinlock: SPIN_LOCK_UNLOCKED,
+ colour_off: L1_CACHE_BYTES,
+ name: "kmem_cache",
+};
+
+/* Guard access to the cache-chain. */
+static struct semaphore cache_chain_sem;
+
+/* Place maintainer for reaping. */
+static kmem_cache_t *clock_searchp = &cache_cache;
+
+#define cache_chain (cache_cache.next)
+
+#ifdef CONFIG_SMP
+/*
+ * chicken and egg problem: delay the per-cpu array allocation
+ * until the general caches are up.
+ */
+static int g_cpucache_up;
+
+static void enable_cpucache (kmem_cache_t *cachep);
+static void enable_all_cpucaches (void);
+#endif
+
+/* Cal the num objs, wastage, and bytes left over for a given slab size. */
+static void kmem_cache_estimate (unsigned long gfporder, size_t size,
+ int flags, size_t *left_over, unsigned int *num)
+{
+ unsigned int i;
+ size_t wastage = PAGE_SIZE<<gfporder;
+ size_t extra = 0;
+ size_t base = 0;
+
+ if (!(flags & CFLGS_OFF_SLAB)) {
+ base = sizeof(slab_t);
+ extra = sizeof(kmem_bufctl_t);
+ }
+ i = 0;
+ while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
+ i++;
+ if (i > 0)
+ i--;
+
+ if (i > SLAB_LIMIT)
+ i = SLAB_LIMIT;
+
+ *num = i;
+ wastage -= i*size;
+ wastage -= L1_CACHE_ALIGN(base+i*extra);
+ *left_over = wastage;
+}
+
+/* Initialisation - setup the `cache' cache. */
+void __init kmem_cache_init(void)
+{
+ size_t left_over;
+
+ init_MUTEX(&cache_chain_sem);
+ INIT_LIST_HEAD(&cache_chain);
+
+ kmem_cache_estimate(0, cache_cache.objsize, 0,
+ &left_over, &cache_cache.num);
+ if (!cache_cache.num)
+ BUG();
+
+ cache_cache.colour = left_over/cache_cache.colour_off;
+ cache_cache.colour_next = 0;
+}
+
+
+/* Initialisation - setup remaining internal and general caches.
+ * Called after the gfp() functions have been enabled, and before smp_init().
+ */
+void __init kmem_cache_sizes_init(void)
+{
+ cache_sizes_t *sizes = cache_sizes;
+ char name[20];
+ /*
+ * Fragmentation resistance on low memory - only use bigger
+ * page orders on machines with more than 32MB of memory.
+ */
+ if (num_physpages > (32 << 20) >> PAGE_SHIFT)
+ slab_break_gfp_order = BREAK_GFP_ORDER_HI;
+ do {
+ /* For performance, all the general caches are L1 aligned.
+ * This should be particularly beneficial on SMP boxes, as it
+ * eliminates "false sharing".
+ * Note for systems short on memory removing the alignment will
+ * allow tighter packing of the smaller caches. */
+ snprintf(name, sizeof(name), "size-%lu",(unsigned long)sizes->cs_size);
+ if (!(sizes->cs_cachep =
+ kmem_cache_create(name, sizes->cs_size,
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
+ BUG();
+ }
+
+ /* Inc off-slab bufctl limit until the ceiling is hit. */
+ if (!(OFF_SLAB(sizes->cs_cachep))) {
+ offslab_limit = sizes->cs_size-sizeof(slab_t);
+ offslab_limit /= 2;
+ }
+ snprintf(name, sizeof(name), "size-%lu(DMA)",(unsigned long)sizes->cs_size);
+ sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,
+ SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!sizes->cs_dmacachep)
+ BUG();
+ sizes++;
+ } while (sizes->cs_size);
+}
+
+int __init kmem_cpucache_init(void)
+{
+#ifdef CONFIG_SMP
+ g_cpucache_up = 1;
+ enable_all_cpucaches();
+#endif
+ return 0;
+}
+
+__initcall(kmem_cpucache_init);
+
+/* Interface to system's page allocator. No need to hold the cache-lock.
+ */
+static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
+{
+ void *addr;
+
+ /*
+ * If we requested dmaable memory, we will get it. Even if we
+ * did not request dmaable memory, we might get it, but that
+ * would be relatively rare and ignorable.
+ */
+ flags |= cachep->gfpflags;
+ addr = (void*) __get_free_pages(flags, cachep->gfporder);
+ /* Assume that now we have the pages no one else can legally
+ * messes with the 'struct page's.
+ * However vm_scan() might try to test the structure to see if
+ * it is a named-page or buffer-page. The members it tests are
+ * of no interest here.....
+ */
+ return addr;
+}
+
+/* Interface to system's page release. */
+static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
+{
+ unsigned long i = (1<<cachep->gfporder);
+ struct page *page = virt_to_page(addr);
+
+ /* free_pages() does not clear the type bit - we do that.
+ * The pages have been unlinked from their cache-slab,
+ * but their 'struct page's might be accessed in
+ * vm_scan(). Shouldn't be a worry.
+ */
+ while (i--) {
+ PageClearSlab(page);
+ page++;
+ }
+ free_pages((unsigned long)addr, cachep->gfporder);
+}
+
+#if DEBUG
+static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
+{
+ int size = cachep->objsize;
+ if (cachep->flags & SLAB_RED_ZONE) {
+ addr += BYTES_PER_WORD;
+ size -= 2*BYTES_PER_WORD;
+ }
+ memset(addr, POISON_BYTE, size);
+ *(unsigned char *)(addr+size-1) = POISON_END;
+}
+
+static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
+{
+ int size = cachep->objsize;
+ void *end;
+ if (cachep->flags & SLAB_RED_ZONE) {
+ addr += BYTES_PER_WORD;
+ size -= 2*BYTES_PER_WORD;
+ }
+ end = memchr(addr, POISON_END, size);
+ if (end != (addr+size-1))
+ return 1;
+ return 0;
+}
+#endif
+
+/* Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
+ */
+static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
+{
+ if (cachep->dtor
+#if DEBUG
+ || cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)
+#endif
+ ) {
+ int i;
+ for (i = 0; i < cachep->num; i++) {
+ void* objp = slabp->s_mem+cachep->objsize*i;
+#if DEBUG
+ if (cachep->flags & SLAB_RED_ZONE) {
+ if (*((unsigned long*)(objp)) != RED_MAGIC1)
+ BUG();
+ if (*((unsigned long*)(objp + cachep->objsize
+ -BYTES_PER_WORD)) != RED_MAGIC1)
+ BUG();
+ objp += BYTES_PER_WORD;
+ }
+#endif
+ if (cachep->dtor)
+ (cachep->dtor)(objp, cachep, 0);
+#if DEBUG
+ if (cachep->flags & SLAB_RED_ZONE) {
+ objp -= BYTES_PER_WORD;
+ }
+ if ((cachep->flags & SLAB_POISON) &&
+ kmem_check_poison_obj(cachep, objp))
+ BUG();
+#endif
+ }
+ }
+
+ kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slabp);
+}
+
+/**
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @offset: The offset to use within the page.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ * @dtor: A destructor for the objects.
+ *
+ * Returns a ptr to the cache on success, NULL on failure.
+ * Cannot be called within a int, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache
+ * and the @dtor is run before the pages are handed back.
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
+ * memory pressure.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ */
+kmem_cache_t *
+kmem_cache_create (const char *name, size_t size, size_t offset,
+ unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
+ void (*dtor)(void*, kmem_cache_t *, unsigned long))
+{
+ const char *func_nm = "kmem_create: ";
+ size_t left_over, align, slab_size;
+ kmem_cache_t *cachep = NULL;
+
+ /*
+ * Sanity checks... these are all serious usage bugs.
+ */
+ if ((!name) ||
+ ((strlen(name) >= CACHE_NAMELEN - 1)) ||
+ in_interrupt() ||
+ (size < BYTES_PER_WORD) ||
+ (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
+ (dtor && !ctor) ||
+ (offset < 0 || offset > size))
+ BUG();
+
+#if DEBUG
+ if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
+ /* No constructor, but inital state check requested */
+ printk(KERN_ERR "%sNo con, but init state check requested - %s\n", func_nm, name);
+ flags &= ~SLAB_DEBUG_INITIAL;
+ }
+
+ if ((flags & SLAB_POISON) && ctor) {
+ /* request for poisoning, but we can't do that with a constructor */
+ printk(KERN_ERR "%sPoisoning requested, but con given - %s\n", func_nm, name);
+ flags &= ~SLAB_POISON;
+ }
+#if FORCED_DEBUG
+ if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))
+ /*
+ * do not red zone large object, causes severe
+ * fragmentation.
+ */
+ flags |= SLAB_RED_ZONE;
+ if (!ctor)
+ flags |= SLAB_POISON;
+#endif
+#endif
+
+ /*
+ * Always checks flags, a caller might be expecting debug
+ * support which isn't available.
+ */
+ BUG_ON(flags & ~CREATE_MASK);
+
+ /* Get cache's description obj. */
+ cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+ if (!cachep)
+ goto opps;
+ memset(cachep, 0, sizeof(kmem_cache_t));
+
+ /* Check that size is in terms of words. This is needed to avoid
+ * unaligned accesses for some archs when redzoning is used, and makes
+ * sure any on-slab bufctl's are also correctly aligned.
+ */
+ if (size & (BYTES_PER_WORD-1)) {
+ size += (BYTES_PER_WORD-1);
+ size &= ~(BYTES_PER_WORD-1);
+ printk(KERN_WARNING "%sForcing size word alignment - %s\n", func_nm, name);
+ }
+
+#if DEBUG
+ if (flags & SLAB_RED_ZONE) {
+ /*
+ * There is no point trying to honour cache alignment
+ * when redzoning.
+ */
+ flags &= ~SLAB_HWCACHE_ALIGN;
+ size += 2*BYTES_PER_WORD; /* words for redzone */
+ }
+#endif
+ align = BYTES_PER_WORD;
+ if (flags & SLAB_HWCACHE_ALIGN)
+ align = L1_CACHE_BYTES;
+
+ /* Determine if the slab management is 'on' or 'off' slab. */
+ if (size >= (PAGE_SIZE>>3))
+ /*
+ * Size is large, assume best to place the slab management obj
+ * off-slab (should allow better packing of objs).
+ */
+ flags |= CFLGS_OFF_SLAB;
+
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ /* Need to adjust size so that objs are cache aligned. */
+ /* Small obj size, can get at least two per cache line. */
+ /* FIXME: only power of 2 supported, was better */
+ while (size < align/2)
+ align /= 2;
+ size = (size+align-1)&(~(align-1));
+ }
+
+ /* Cal size (in pages) of slabs, and the num of objs per slab.
+ * This could be made much more intelligent. For now, try to avoid
+ * using high page-orders for slabs. When the gfp() funcs are more
+ * friendly towards high-order requests, this should be changed.
+ */
+ do {
+ unsigned int break_flag = 0;
+cal_wastage:
+ kmem_cache_estimate(cachep->gfporder, size, flags,
+ &left_over, &cachep->num);
+ if (break_flag)
+ break;
+ if (cachep->gfporder >= MAX_GFP_ORDER)
+ break;
+ if (!cachep->num)
+ goto next;
+ if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {
+ /* Oops, this num of objs will cause problems. */
+ cachep->gfporder--;
+ break_flag++;
+ goto cal_wastage;
+ }
+
+ /*
+ * Large num of objs is good, but v. large slabs are currently
+ * bad for the gfp()s.
+ */
+ if (cachep->gfporder >= slab_break_gfp_order)
+ break;
+
+ if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
+ break; /* Acceptable internal fragmentation. */
+next:
+ cachep->gfporder++;
+ } while (1);
+
+ if (!cachep->num) {
+ printk("kmem_cache_create: couldn't create cache %s.\n", name);
+ kmem_cache_free(&cache_cache, cachep);
+ cachep = NULL;
+ goto opps;
+ }
+ slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t)+sizeof(slab_t));
+
+ /*
+ * If the slab has been placed off-slab, and we have enough space then
+ * move it on-slab. This is at the expense of any extra colouring.
+ */
+ if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
+ flags &= ~CFLGS_OFF_SLAB;
+ left_over -= slab_size;
+ }
+
+ /* Offset must be a multiple of the alignment. */
+ offset += (align-1);
+ offset &= ~(align-1);
+ if (!offset)
+ offset = L1_CACHE_BYTES;
+ cachep->colour_off = offset;
+ cachep->colour = left_over/offset;
+
+ /* init remaining fields */
+ if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))
+ flags |= CFLGS_OPTIMIZE;
+
+ cachep->flags = flags;
+ cachep->gfpflags = 0;
+ if (flags & SLAB_CACHE_DMA)
+ cachep->gfpflags |= GFP_DMA;
+ spin_lock_init(&cachep->spinlock);
+ cachep->objsize = size;
+ INIT_LIST_HEAD(&cachep->slabs_full);
+ INIT_LIST_HEAD(&cachep->slabs_partial);
+ INIT_LIST_HEAD(&cachep->slabs_free);
+
+ if (flags & CFLGS_OFF_SLAB)
+ cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
+ cachep->ctor = ctor;
+ cachep->dtor = dtor;
+ /* Copy name over so we don't have problems with unloaded modules */
+ strcpy(cachep->name, name);
+
+#ifdef CONFIG_SMP
+ if (g_cpucache_up)
+ enable_cpucache(cachep);
+#endif
+ /* Need the semaphore to access the chain. */
+ down(&cache_chain_sem);
+ {
+ struct list_head *p;
+
+ list_for_each(p, &cache_chain) {
+ kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+
+ /* The name field is constant - no lock needed. */
+ if (!strcmp(pc->name, name))
+ BUG();
+ }
+ }
+
+ /* There is no reason to lock our new cache before we
+ * link it in - no one knows about it yet...
+ */
+ list_add(&cachep->next, &cache_chain);
+ up(&cache_chain_sem);
+opps:
+ return cachep;
+}
+
+
+#if DEBUG
+/*
+ * This check if the kmem_cache_t pointer is chained in the cache_cache
+ * list. -arca
+ */
+static int is_chained_kmem_cache(kmem_cache_t * cachep)
+{
+ struct list_head *p;
+ int ret = 0;
+
+ /* Find the cache in the chain of caches. */
+ down(&cache_chain_sem);
+ list_for_each(p, &cache_chain) {
+ if (p == &cachep->next) {
+ ret = 1;
+ break;
+ }
+ }
+ up(&cache_chain_sem);
+
+ return ret;
+}
+#else
+#define is_chained_kmem_cache(x) 1
+#endif
+
+#ifdef CONFIG_SMP
+/*
+ * Waits for all CPUs to execute func().
+ */
+static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
+{
+ local_irq_disable();
+ func(arg);
+ local_irq_enable();
+
+ if (smp_call_function(func, arg, 1, 1))
+ BUG();
+}
+typedef struct ccupdate_struct_s
+{
+ kmem_cache_t *cachep;
+ cpucache_t *new[NR_CPUS];
+} ccupdate_struct_t;
+
+static void do_ccupdate_local(void *info)
+{
+ ccupdate_struct_t *new = (ccupdate_struct_t *)info;
+ cpucache_t *old = cc_data(new->cachep);
+
+ cc_data(new->cachep) = new->new[smp_processor_id()];
+ new->new[smp_processor_id()] = old;
+}
+
+static void free_block (kmem_cache_t* cachep, void** objpp, int len);
+
+static void drain_cpu_caches(kmem_cache_t *cachep)
+{
+ ccupdate_struct_t new;
+ int i;
+
+ memset(&new.new,0,sizeof(new.new));
+
+ new.cachep = cachep;
+
+ down(&cache_chain_sem);
+ smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ cpucache_t* ccold = new.new[cpu_logical_map(i)];
+ if (!ccold || (ccold->avail == 0))
+ continue;
+ local_irq_disable();
+ free_block(cachep, cc_entry(ccold), ccold->avail);
+ local_irq_enable();
+ ccold->avail = 0;
+ }
+ smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+ up(&cache_chain_sem);
+}
+
+#else
+#define drain_cpu_caches(cachep) do { } while (0)
+#endif
+
+/*
+ * Called with the &cachep->spinlock held, returns number of slabs released
+ */
+static int __kmem_cache_shrink_locked(kmem_cache_t *cachep)
+{
+ slab_t *slabp;
+ int ret = 0;
+
+ /* If the cache is growing, stop shrinking. */
+ while (!cachep->growing) {
+ struct list_head *p;
+
+ p = cachep->slabs_free.prev;
+ if (p == &cachep->slabs_free)
+ break;
+
+ slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
+#if DEBUG
+ if (slabp->inuse)
+ BUG();
+#endif
+ list_del(&slabp->list);
+
+ spin_unlock_irq(&cachep->spinlock);
+ kmem_slab_destroy(cachep, slabp);
+ ret++;
+ spin_lock_irq(&cachep->spinlock);
+ }
+ return ret;
+}
+
+static int __kmem_cache_shrink(kmem_cache_t *cachep)
+{
+ int ret;
+
+ drain_cpu_caches(cachep);
+
+ spin_lock_irq(&cachep->spinlock);
+ __kmem_cache_shrink_locked(cachep);
+ ret = !list_empty(&cachep->slabs_full) ||
+ !list_empty(&cachep->slabs_partial);
+ spin_unlock_irq(&cachep->spinlock);
+ return ret;
+}
+
+/**
+ * kmem_cache_shrink - Shrink a cache.
+ * @cachep: The cache to shrink.
+ *
+ * Releases as many slabs as possible for a cache.
+ * Returns number of pages released.
+ */
+int kmem_cache_shrink(kmem_cache_t *cachep)
+{
+ int ret;
+
+ if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))
+ BUG();
+
+ spin_lock_irq(&cachep->spinlock);
+ ret = __kmem_cache_shrink_locked(cachep);
+ spin_unlock_irq(&cachep->spinlock);
+
+ return ret << cachep->gfporder;
+}
+
+/**
+ * kmem_cache_destroy - delete a cache
+ * @cachep: the cache to destroy
+ *
+ * Remove a kmem_cache_t object from the slab cache.
+ * Returns 0 on success.
+ *
+ * It is expected this function will be called by a module when it is
+ * unloaded. This will remove the cache completely, and avoid a duplicate
+ * cache being allocated each time a module is loaded and unloaded, if the
+ * module doesn't have persistent in-kernel storage across loads and unloads.
+ *
+ * The caller must guarantee that noone will allocate memory from the cache
+ * during the kmem_cache_destroy().
+ */
+int kmem_cache_destroy (kmem_cache_t * cachep)
+{
+ if (!cachep || in_interrupt() || cachep->growing)
+ BUG();
+
+ /* Find the cache in the chain of caches. */
+ down(&cache_chain_sem);
+ /* the chain is never empty, cache_cache is never destroyed */
+ if (clock_searchp == cachep)
+ clock_searchp = list_entry(cachep->next.next,
+ kmem_cache_t, next);
+ list_del(&cachep->next);
+ up(&cache_chain_sem);
+
+ if (__kmem_cache_shrink(cachep)) {
+ printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
+ cachep);
+ down(&cache_chain_sem);
+ list_add(&cachep->next,&cache_chain);
+ up(&cache_chain_sem);
+ return 1;
+ }
+#ifdef CONFIG_SMP
+ {
+ int i;
+ for (i = 0; i < NR_CPUS; i++)
+ kfree(cachep->cpudata[i]);
+ }
+#endif
+ kmem_cache_free(&cache_cache, cachep);
+
+ return 0;
+}
+
+/* Get the memory for a slab management obj. */
+static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,
+ void *objp, int colour_off, int local_flags)
+{
+ slab_t *slabp;
+
+ if (OFF_SLAB(cachep)) {
+ /* Slab management obj is off-slab. */
+ slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
+ if (!slabp)
+ return NULL;
+ } else {
+ /* FIXME: change to
+ slabp = objp
+ * if you enable OPTIMIZE
+ */
+ slabp = objp+colour_off;
+ colour_off += L1_CACHE_ALIGN(cachep->num *
+ sizeof(kmem_bufctl_t) + sizeof(slab_t));
+ }
+ slabp->inuse = 0;
+ slabp->colouroff = colour_off;
+ slabp->s_mem = objp+colour_off;
+
+ return slabp;
+}
+
+static inline void kmem_cache_init_objs (kmem_cache_t * cachep,
+ slab_t * slabp, unsigned long ctor_flags)
+{
+ int i;
+
+ for (i = 0; i < cachep->num; i++) {
+ void* objp = slabp->s_mem+cachep->objsize*i;
+#if DEBUG
+ if (cachep->flags & SLAB_RED_ZONE) {
+ *((unsigned long*)(objp)) = RED_MAGIC1;
+ *((unsigned long*)(objp + cachep->objsize -
+ BYTES_PER_WORD)) = RED_MAGIC1;
+ objp += BYTES_PER_WORD;
+ }
+#endif
+
+ /*
+ * Constructors are not allowed to allocate memory from
+ * the same cache which they are a constructor for.
+ * Otherwise, deadlock. They must also be threaded.
+ */
+ if (cachep->ctor)
+ cachep->ctor(objp, cachep, ctor_flags);
+#if DEBUG
+ if (cachep->flags & SLAB_RED_ZONE)
+ objp -= BYTES_PER_WORD;
+ if (cachep->flags & SLAB_POISON)
+ /* need to poison the objs */
+ kmem_poison_obj(cachep, objp);
+ if (cachep->flags & SLAB_RED_ZONE) {
+ if (*((unsigned long*)(objp)) != RED_MAGIC1)
+ BUG();
+ if (*((unsigned long*)(objp + cachep->objsize -
+ BYTES_PER_WORD)) != RED_MAGIC1)
+ BUG();
+ }
+#endif
+ slab_bufctl(slabp)[i] = i+1;
+ }
+ slab_bufctl(slabp)[i-1] = BUFCTL_END;
+ slabp->free = 0;
+}
+
+/*
+ * Grow (by 1) the number of slabs within a cache. This is called by
+ * kmem_cache_alloc() when there are no active objs left in a cache.
+ */
+static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
+{
+ slab_t *slabp;
+ struct page *page;
+ void *objp;
+ size_t offset;
+ unsigned int i, local_flags;
+ unsigned long ctor_flags;
+ unsigned long save_flags;
+
+ /* Be lazy and only check for valid flags here,
+ * keeping it out of the critical path in kmem_cache_alloc().
+ */
+ if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
+ BUG();
+ if (flags & SLAB_NO_GROW)
+ return 0;
+
+ /*
+ * The test for missing atomic flag is performed here, rather than
+ * the more obvious place, simply to reduce the critical path length
+ * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
+ * will eventually be caught here (where it matters).
+ */
+ if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)
+ BUG();
+
+ ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+ local_flags = (flags & SLAB_LEVEL_MASK);
+ if (local_flags == SLAB_ATOMIC)
+ /*
+ * Not allowed to sleep. Need to tell a constructor about
+ * this - it might need to know...
+ */
+ ctor_flags |= SLAB_CTOR_ATOMIC;
+
+ /* About to mess with non-constant members - lock. */
+ spin_lock_irqsave(&cachep->spinlock, save_flags);
+
+ /* Get colour for the slab, and cal the next value. */
+ offset = cachep->colour_next;
+ cachep->colour_next++;
+ if (cachep->colour_next >= cachep->colour)
+ cachep->colour_next = 0;
+ offset *= cachep->colour_off;
+ cachep->dflags |= DFLGS_GROWN;
+
+ cachep->growing++;
+ spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+
+ /* A series of memory allocations for a new slab.
+ * Neither the cache-chain semaphore, or cache-lock, are
+ * held, but the incrementing c_growing prevents this
+ * cache from being reaped or shrunk.
+ * Note: The cache could be selected in for reaping in
+ * kmem_cache_reap(), but when the final test is made the
+ * growing value will be seen.
+ */
+
+ /* Get mem for the objs. */
+ if (!(objp = kmem_getpages(cachep, flags)))
+ goto failed;
+
+ /* Get slab management. */
+ if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, local_flags)))
+ goto opps1;
+
+ /* Nasty!!!!!! I hope this is OK. */
+ i = 1 << cachep->gfporder;
+ page = virt_to_page(objp);
+ do {
+ SET_PAGE_CACHE(page, cachep);
+ SET_PAGE_SLAB(page, slabp);
+ PageSetSlab(page);
+ page++;
+ } while (--i);
+
+ kmem_cache_init_objs(cachep, slabp, ctor_flags);
+
+ spin_lock_irqsave(&cachep->spinlock, save_flags);
+ cachep->growing--;
+
+ /* Make slab active. */
+ list_add_tail(&slabp->list, &cachep->slabs_free);
+ STATS_INC_GROWN(cachep);
+ cachep->failures = 0;
+
+ spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+ return 1;
+opps1:
+ kmem_freepages(cachep, objp);
+failed:
+ spin_lock_irqsave(&cachep->spinlock, save_flags);
+ cachep->growing--;
+ spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+ return 0;
+}
+
+/*
+ * Perform extra freeing checks:
+ * - detect double free
+ * - detect bad pointers.
+ * Called with the cache-lock held.
+ */
+
+#if DEBUG
+static int kmem_extra_free_checks (kmem_cache_t * cachep,
+ slab_t *slabp, void * objp)
+{
+ int i;
+ unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
+
+ if (objnr >= cachep->num)
+ BUG();
+ if (objp != slabp->s_mem + objnr*cachep->objsize)
+ BUG();
+
+ /* Check slab's freelist to see if this obj is there. */
+ for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
+ if (i == objnr)
+ BUG();
+ }
+ return 0;
+}
+#endif
+
+static inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags)
+{
+ if (flags & SLAB_DMA) {
+ if (!(cachep->gfpflags & GFP_DMA))
+ BUG();
+ } else {
+ if (cachep->gfpflags & GFP_DMA)
+ BUG();
+ }
+}
+
+static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
+ slab_t *slabp)
+{
+ void *objp;
+
+ STATS_INC_ALLOCED(cachep);
+ STATS_INC_ACTIVE(cachep);
+ STATS_SET_HIGH(cachep);
+
+ /* get obj pointer */
+ slabp->inuse++;
+ objp = slabp->s_mem + slabp->free*cachep->objsize;
+ slabp->free=slab_bufctl(slabp)[slabp->free];
+
+ if (unlikely(slabp->free == BUFCTL_END)) {
+ list_del(&slabp->list);
+ list_add(&slabp->list, &cachep->slabs_full);
+ }
+#if DEBUG
+ if (cachep->flags & SLAB_POISON)
+ if (kmem_check_poison_obj(cachep, objp))
+ BUG();
+ if (cachep->flags & SLAB_RED_ZONE) {
+ /* Set alloc red-zone, and check old one. */
+ if (xchg((unsigned long *)objp, RED_MAGIC2) !=
+ RED_MAGIC1)
+ BUG();
+ if (xchg((unsigned long *)(objp+cachep->objsize -
+ BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
+ BUG();
+ objp += BYTES_PER_WORD;
+ }
+#endif
+ return objp;
+}
+
+/*
+ * Returns a ptr to an obj in the given cache.
+ * caller must guarantee synchronization
+ * #define for the goto optimization 8-)
+ */
+#define kmem_cache_alloc_one(cachep) \
+({ \
+ struct list_head * slabs_partial, * entry; \
+ slab_t *slabp; \
+ \
+ slabs_partial = &(cachep)->slabs_partial; \
+ entry = slabs_partial->next; \
+ if (unlikely(entry == slabs_partial)) { \
+ struct list_head * slabs_free; \
+ slabs_free = &(cachep)->slabs_free; \
+ entry = slabs_free->next; \
+ if (unlikely(entry == slabs_free)) \
+ goto alloc_new_slab; \
+ list_del(entry); \
+ list_add(entry, slabs_partial); \
+ } \
+ \
+ slabp = list_entry(entry, slab_t, list); \
+ kmem_cache_alloc_one_tail(cachep, slabp); \
+})
+
+#ifdef CONFIG_SMP
+void* kmem_cache_alloc_batch(kmem_cache_t* cachep, cpucache_t* cc, int flags)
+{
+ int batchcount = cachep->batchcount;
+
+ spin_lock(&cachep->spinlock);
+ while (batchcount--) {
+ struct list_head * slabs_partial, * entry;
+ slab_t *slabp;
+ /* Get slab alloc is to come from. */
+ slabs_partial = &(cachep)->slabs_partial;
+ entry = slabs_partial->next;
+ if (unlikely(entry == slabs_partial)) {
+ struct list_head * slabs_free;
+ slabs_free = &(cachep)->slabs_free;
+ entry = slabs_free->next;
+ if (unlikely(entry == slabs_free))
+ break;
+ list_del(entry);
+ list_add(entry, slabs_partial);
+ }
+
+ slabp = list_entry(entry, slab_t, list);
+ cc_entry(cc)[cc->avail++] =
+ kmem_cache_alloc_one_tail(cachep, slabp);
+ }
+ spin_unlock(&cachep->spinlock);
+
+ if (cc->avail)
+ return cc_entry(cc)[--cc->avail];
+ return NULL;
+}
+#endif
+
+static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+{
+ unsigned long save_flags;
+ void* objp;
+
+ kmem_cache_alloc_head(cachep, flags);
+try_again:
+ local_irq_save(save_flags);
+#ifdef CONFIG_SMP
+ {
+ cpucache_t *cc = cc_data(cachep);
+
+ if (cc) {
+ if (cc->avail) {
+ STATS_INC_ALLOCHIT(cachep);
+ objp = cc_entry(cc)[--cc->avail];
+ } else {
+ STATS_INC_ALLOCMISS(cachep);
+ objp = kmem_cache_alloc_batch(cachep,cc,flags);
+ if (!objp)
+ goto alloc_new_slab_nolock;
+ }
+ } else {
+ spin_lock(&cachep->spinlock);
+ objp = kmem_cache_alloc_one(cachep);
+ spin_unlock(&cachep->spinlock);
+ }
+ }
+#else
+ objp = kmem_cache_alloc_one(cachep);
+#endif
+ local_irq_restore(save_flags);
+ return objp;
+alloc_new_slab:
+#ifdef CONFIG_SMP
+ spin_unlock(&cachep->spinlock);
+alloc_new_slab_nolock:
+#endif
+ local_irq_restore(save_flags);
+ if (kmem_cache_grow(cachep, flags))
+ /* Someone may have stolen our objs. Doesn't matter, we'll
+ * just come back here again.
+ */
+ goto try_again;
+ return NULL;
+}
+
+/*
+ * Release an obj back to its cache. If the obj has a constructed
+ * state, it should be in this state _before_ it is released.
+ * - caller is responsible for the synchronization
+ */
+
+#if DEBUG
+# define CHECK_NR(pg) \
+ do { \
+ if (!VALID_PAGE(pg)) { \
+ printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
+ (unsigned long)objp); \
+ BUG(); \
+ } \
+ } while (0)
+# define CHECK_PAGE(page) \
+ do { \
+ CHECK_NR(page); \
+ if (!PageSlab(page)) { \
+ printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
+ (unsigned long)objp); \
+ BUG(); \
+ } \
+ } while (0)
+
+#else
+# define CHECK_PAGE(pg) do { } while (0)
+#endif
+
+static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
+{
+ slab_t* slabp;
+
+ CHECK_PAGE(virt_to_page(objp));
+ /* reduces memory footprint
+ *
+ if (OPTIMIZE(cachep))
+ slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
+ else
+ */
+ slabp = GET_PAGE_SLAB(virt_to_page(objp));
+
+#if DEBUG
+ if (cachep->flags & SLAB_DEBUG_INITIAL)
+ /* Need to call the slab's constructor so the
+ * caller can perform a verify of its state (debugging).
+ * Called without the cache-lock held.
+ */
+ cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+
+ if (cachep->flags & SLAB_RED_ZONE) {
+ objp -= BYTES_PER_WORD;
+ if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
+ /* Either write before start, or a double free. */
+ BUG();
+ if (xchg((unsigned long *)(objp+cachep->objsize -
+ BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
+ /* Either write past end, or a double free. */
+ BUG();
+ }
+ if (cachep->flags & SLAB_POISON)
+ kmem_poison_obj(cachep, objp);
+ if (kmem_extra_free_checks(cachep, slabp, objp))
+ return;
+#endif
+ {
+ unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
+
+ slab_bufctl(slabp)[objnr] = slabp->free;
+ slabp->free = objnr;
+ }
+ STATS_DEC_ACTIVE(cachep);
+
+ /* fixup slab chains */
+ {
+ int inuse = slabp->inuse;
+ if (unlikely(!--slabp->inuse)) {
+ /* Was partial or full, now empty. */
+ list_del(&slabp->list);
+ list_add(&slabp->list, &cachep->slabs_free);
+ } else if (unlikely(inuse == cachep->num)) {
+ /* Was full. */
+ list_del(&slabp->list);
+ list_add(&slabp->list, &cachep->slabs_partial);
+ }
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void __free_block (kmem_cache_t* cachep,
+ void** objpp, int len)
+{
+ for ( ; len > 0; len--, objpp++)
+ kmem_cache_free_one(cachep, *objpp);
+}
+
+static void free_block (kmem_cache_t* cachep, void** objpp, int len)
+{
+ spin_lock(&cachep->spinlock);
+ __free_block(cachep, objpp, len);
+ spin_unlock(&cachep->spinlock);
+}
+#endif
+
+/*
+ * __kmem_cache_free
+ * called with disabled ints
+ */
+static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
+{
+#ifdef CONFIG_SMP
+ cpucache_t *cc = cc_data(cachep);
+
+ CHECK_PAGE(virt_to_page(objp));
+ if (cc) {
+ int batchcount;
+ if (cc->avail < cc->limit) {
+ STATS_INC_FREEHIT(cachep);
+ cc_entry(cc)[cc->avail++] = objp;
+ return;
+ }
+ STATS_INC_FREEMISS(cachep);
+ batchcount = cachep->batchcount;
+ cc->avail -= batchcount;
+ free_block(cachep,
+ &cc_entry(cc)[cc->avail],batchcount);
+ cc_entry(cc)[cc->avail++] = objp;
+ return;
+ } else {
+ free_block(cachep, &objp, 1);
+ }
+#else
+ kmem_cache_free_one(cachep, objp);
+#endif
+}
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache. The flags are only relevant
+ * if the cache has no available objects.
+ */
+void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+{
+ return __kmem_cache_alloc(cachep, flags);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
+ *
+ * Additionally, the %GFP_DMA flag may be set to indicate the memory
+ * must be suitable for DMA. This can mean different things on different
+ * platforms. For example, on i386, it means that the memory must come
+ * from the first 16MB.
+ */
+void * kmalloc (size_t size, int flags)
+{
+ cache_sizes_t *csizep = cache_sizes;
+
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ if (size >= PAGE_SIZE) {
+ unsigned long addr;
+ addr = __get_contiguous_pages(flags, (size+PAGE_SIZE-1)/PAGE_SIZE, 0);
+ return (void *)addr;
+ }
+#endif
+
+ for (; csizep->cs_size; csizep++) {
+ if (size > csizep->cs_size)
+ continue;
+ return __kmem_cache_alloc(flags & GFP_DMA ?
+ csizep->cs_dmacachep : csizep->cs_cachep, flags);
+ }
+ return NULL;
+}
+
+/**
+ * kmem_cache_free - Deallocate an object
+ * @cachep: The cache the allocation was from.
+ * @objp: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ */
+void kmem_cache_free (kmem_cache_t *cachep, void *objp)
+{
+ unsigned long flags;
+#if DEBUG
+ CHECK_PAGE(virt_to_page(objp));
+ if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
+ BUG();
+#endif
+
+ local_irq_save(flags);
+ __kmem_cache_free(cachep, objp);
+ local_irq_restore(flags);
+}
+
+/**
+ * kfree - free previously allocated memory
+ * @objp: pointer returned by kmalloc.
+ *
+ * Don't free memory not originally allocated by kmalloc()
+ * or you will run into trouble.
+ */
+void kfree (const void *objp)
+{
+ kmem_cache_t *c;
+ unsigned long flags;
+ struct page *p = virt_to_page(objp);
+
+ if (!objp)
+ return;
+
+ local_irq_save(flags);
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ if (!PageSlab(p)) {
+ if (objp != page_address(p))
+ BUG();
+ if ((p->index & ~0xffff) == 0xa1c20000)
+ free_contiguous_pages((unsigned long)objp,
+ p->index & 0xffff);
+ else
+ BUG();
+ local_irq_restore(flags);
+ return;
+ }
+#endif
+ CHECK_PAGE(p);
+ c = GET_PAGE_CACHE(p);
+ __kmem_cache_free(c, (void*)objp);
+ local_irq_restore(flags);
+}
+
+unsigned int kmem_cache_size(kmem_cache_t *cachep)
+{
+#if DEBUG
+ if (cachep->flags & SLAB_RED_ZONE)
+ return (cachep->objsize - 2*BYTES_PER_WORD);
+#endif
+ return cachep->objsize;
+}
+
+kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
+{
+ cache_sizes_t *csizep = cache_sizes;
+
+ /* This function could be moved to the header file, and
+ * made inline so consumers can quickly determine what
+ * cache pointer they require.
+ */
+ for ( ; csizep->cs_size; csizep++) {
+ if (size > csizep->cs_size)
+ continue;
+ break;
+ }
+ return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
+}
+
+#ifdef CONFIG_SMP
+
+/* called with cache_chain_sem acquired. */
+static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
+{
+ ccupdate_struct_t new;
+ int i;
+
+ /*
+ * These are admin-provided, so we are more graceful.
+ */
+ if (limit < 0)
+ return -EINVAL;
+ if (batchcount < 0)
+ return -EINVAL;
+ if (batchcount > limit)
+ return -EINVAL;
+ if (limit != 0 && !batchcount)
+ return -EINVAL;
+
+ memset(&new.new,0,sizeof(new.new));
+ if (limit) {
+ for (i = 0; i< smp_num_cpus; i++) {
+ cpucache_t* ccnew;
+
+ ccnew = kmalloc(sizeof(void*)*limit+
+ sizeof(cpucache_t), GFP_KERNEL);
+ if (!ccnew)
+ goto oom;
+ ccnew->limit = limit;
+ ccnew->avail = 0;
+ new.new[cpu_logical_map(i)] = ccnew;
+ }
+ }
+ new.cachep = cachep;
+ spin_lock_irq(&cachep->spinlock);
+ cachep->batchcount = batchcount;
+ spin_unlock_irq(&cachep->spinlock);
+
+ smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ cpucache_t* ccold = new.new[cpu_logical_map(i)];
+ if (!ccold)
+ continue;
+ local_irq_disable();
+ free_block(cachep, cc_entry(ccold), ccold->avail);
+ local_irq_enable();
+ kfree(ccold);
+ }
+ return 0;
+oom:
+ for (i--; i >= 0; i--)
+ kfree(new.new[cpu_logical_map(i)]);
+ return -ENOMEM;
+}
+
+static void enable_cpucache (kmem_cache_t *cachep)
+{
+ int err;
+ int limit;
+
+ /* FIXME: optimize */
+ if (cachep->objsize > PAGE_SIZE)
+ return;
+ if (cachep->objsize > 1024)
+ limit = 60;
+ else if (cachep->objsize > 256)
+ limit = 124;
+ else
+ limit = 252;
+
+ err = kmem_tune_cpucache(cachep, limit, limit/2);
+ if (err)
+ printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
+ cachep->name, -err);
+}
+
+static void enable_all_cpucaches (void)
+{
+ struct list_head* p;
+
+ down(&cache_chain_sem);
+
+ p = &cache_cache.next;
+ do {
+ kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
+
+ enable_cpucache(cachep);
+ p = cachep->next.next;
+ } while (p != &cache_cache.next);
+
+ up(&cache_chain_sem);
+}
+#endif
+
+/**
+ * kmem_cache_reap - Reclaim memory from caches.
+ * @gfp_mask: the type of memory required.
+ *
+ * Called from do_try_to_free_pages() and __alloc_pages()
+ */
+int kmem_cache_reap (int gfp_mask)
+{
+ slab_t *slabp;
+ kmem_cache_t *searchp;
+ kmem_cache_t *best_cachep;
+ unsigned int best_pages;
+ unsigned int best_len;
+ unsigned int scan;
+ int ret = 0;
+
+ if (gfp_mask & __GFP_WAIT)
+ down(&cache_chain_sem);
+ else
+ if (down_trylock(&cache_chain_sem))
+ return 0;
+
+ scan = REAP_SCANLEN;
+ best_len = 0;
+ best_pages = 0;
+ best_cachep = NULL;
+ searchp = clock_searchp;
+ do {
+ unsigned int pages;
+ struct list_head* p;
+ unsigned int full_free;
+
+ /* It's safe to test this without holding the cache-lock. */
+ if (searchp->flags & SLAB_NO_REAP)
+ goto next;
+ spin_lock_irq(&searchp->spinlock);
+ if (searchp->growing)
+ goto next_unlock;
+ if (searchp->dflags & DFLGS_GROWN) {
+ searchp->dflags &= ~DFLGS_GROWN;
+ goto next_unlock;
+ }
+#ifdef CONFIG_SMP
+ {
+ cpucache_t *cc = cc_data(searchp);
+ if (cc && cc->avail) {
+ __free_block(searchp, cc_entry(cc), cc->avail);
+ cc->avail = 0;
+ }
+ }
+#endif
+
+ full_free = 0;
+ p = searchp->slabs_free.next;
+ while (p != &searchp->slabs_free) {
+ slabp = list_entry(p, slab_t, list);
+#if DEBUG
+ if (slabp->inuse)
+ BUG();
+#endif
+ full_free++;
+ p = p->next;
+ }
+
+ /*
+ * Try to avoid slabs with constructors and/or
+ * more than one page per slab (as it can be difficult
+ * to get high orders from gfp()).
+ */
+ pages = full_free * (1<<searchp->gfporder);
+ if (searchp->ctor)
+ pages = (pages*4+1)/5;
+ if (searchp->gfporder)
+ pages = (pages*4+1)/5;
+ if (pages > best_pages) {
+ best_cachep = searchp;
+ best_len = full_free;
+ best_pages = pages;
+ if (pages >= REAP_PERFECT) {
+ clock_searchp = list_entry(searchp->next.next,
+ kmem_cache_t,next);
+ goto perfect;
+ }
+ }
+next_unlock:
+ spin_unlock_irq(&searchp->spinlock);
+next:
+ searchp = list_entry(searchp->next.next,kmem_cache_t,next);
+ } while (--scan && searchp != clock_searchp);
+
+ clock_searchp = searchp;
+
+ if (!best_cachep)
+ /* couldn't find anything to reap */
+ goto out;
+
+ spin_lock_irq(&best_cachep->spinlock);
+perfect:
+ /* free only 50% of the free slabs */
+ best_len = (best_len + 1)/2;
+ for (scan = 0; scan < best_len; scan++) {
+ struct list_head *p;
+
+ if (best_cachep->growing)
+ break;
+ p = best_cachep->slabs_free.prev;
+ if (p == &best_cachep->slabs_free)
+ break;
+ slabp = list_entry(p,slab_t,list);
+#if DEBUG
+ if (slabp->inuse)
+ BUG();
+#endif
+ list_del(&slabp->list);
+ STATS_INC_REAPED(best_cachep);
+
+ /* Safe to drop the lock. The slab is no longer linked to the
+ * cache.
+ */
+ spin_unlock_irq(&best_cachep->spinlock);
+ kmem_slab_destroy(best_cachep, slabp);
+ spin_lock_irq(&best_cachep->spinlock);
+ }
+ spin_unlock_irq(&best_cachep->spinlock);
+ ret = scan * (1 << best_cachep->gfporder);
+out:
+ up(&cache_chain_sem);
+ return ret;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct list_head *p;
+
+ down(&cache_chain_sem);
+ if (!n)
+ return (void *)1;
+ p = &cache_cache.next;
+ while (--n) {
+ p = p->next;
+ if (p == &cache_cache.next)
+ return NULL;
+ }
+ return list_entry(p, kmem_cache_t, next);
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ kmem_cache_t *cachep = p;
+ ++*pos;
+ if (p == (void *)1)
+ return &cache_cache;
+ cachep = list_entry(cachep->next.next, kmem_cache_t, next);
+ return cachep == &cache_cache ? NULL : cachep;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ up(&cache_chain_sem);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ kmem_cache_t *cachep = p;
+ struct list_head *q;
+ slab_t *slabp;
+ unsigned long active_objs;
+ unsigned long num_objs;
+ unsigned long active_slabs = 0;
+ unsigned long num_slabs;
+ const char *name;
+
+ if (p == (void*)1) {
+ /*
+ * Output format version, so at least we can change it
+ * without _too_ many complaints.
+ */
+ seq_puts(m, "slabinfo - version: 1.1"
+#if STATS
+ " (statistics)"
+#endif
+#ifdef CONFIG_SMP
+ " (SMP)"
+#endif
+ "\n");
+ return 0;
+ }
+
+ spin_lock_irq(&cachep->spinlock);
+ active_objs = 0;
+ num_slabs = 0;
+ list_for_each(q,&cachep->slabs_full) {
+ slabp = list_entry(q, slab_t, list);
+ if (slabp->inuse != cachep->num)
+ BUG();
+ active_objs += cachep->num;
+ active_slabs++;
+ }
+ list_for_each(q,&cachep->slabs_partial) {
+ slabp = list_entry(q, slab_t, list);
+ if (slabp->inuse == cachep->num || !slabp->inuse)
+ BUG();
+ active_objs += slabp->inuse;
+ active_slabs++;
+ }
+ list_for_each(q,&cachep->slabs_free) {
+ slabp = list_entry(q, slab_t, list);
+ if (slabp->inuse)
+ BUG();
+ num_slabs++;
+ }
+ num_slabs+=active_slabs;
+ num_objs = num_slabs*cachep->num;
+
+ name = cachep->name;
+ {
+ char tmp;
+ if (__get_user(tmp, name))
+ name = "broken";
+ }
+
+ seq_printf(m, "%-17s %6lu %6lu %6u %4lu %4lu %4u",
+ name, active_objs, num_objs, cachep->objsize,
+ active_slabs, num_slabs, (1<<cachep->gfporder));
+
+#if STATS
+ {
+ unsigned long errors = cachep->errors;
+ unsigned long high = cachep->high_mark;
+ unsigned long grown = cachep->grown;
+ unsigned long reaped = cachep->reaped;
+ unsigned long allocs = cachep->num_allocations;
+
+ seq_printf(m, " : %6lu %7lu %5lu %4lu %4lu",
+ high, allocs, grown, reaped, errors);
+ }
+#endif
+#ifdef CONFIG_SMP
+ {
+ cpucache_t *cc = cc_data(cachep);
+ unsigned int batchcount = cachep->batchcount;
+ unsigned int limit;
+
+ if (cc)
+ limit = cc->limit;
+ else
+ limit = 0;
+ seq_printf(m, " : %4u %4u",
+ limit, batchcount);
+ }
+#endif
+#if STATS && defined(CONFIG_SMP)
+ {
+ unsigned long allochit = atomic_read(&cachep->allochit);
+ unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+ unsigned long freehit = atomic_read(&cachep->freehit);
+ unsigned long freemiss = atomic_read(&cachep->freemiss);
+ seq_printf(m, " : %6lu %6lu %6lu %6lu",
+ allochit, allocmiss, freehit, freemiss);
+ }
+#endif
+ spin_unlock_irq(&cachep->spinlock);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+/**
+ * slabinfo_op - iterator that generates /proc/slabinfo
+ *
+ * Output layout:
+ * cache-name
+ * num-active-objs
+ * total-objs
+ * object size
+ * num-active-slabs
+ * total-slabs
+ * num-pages-per-slab
+ * + further values on SMP and with statistics enabled
+ */
+
+struct seq_operations slabinfo_op = {
+ start: s_start,
+ next: s_next,
+ stop: s_stop,
+ show: s_show
+};
+
+#define MAX_SLABINFO_WRITE 128
+/**
+ * slabinfo_write - SMP tuning for the slab allocator
+ * @file: unused
+ * @buffer: user buffer
+ * @count: data len
+ * @data: unused
+ */
+ssize_t slabinfo_write(struct file *file, const char *buffer,
+ size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_SMP
+ char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
+ int limit, batchcount, res;
+ struct list_head *p;
+
+ if (count > MAX_SLABINFO_WRITE)
+ return -EINVAL;
+ if (copy_from_user(&kbuf, buffer, count))
+ return -EFAULT;
+ kbuf[MAX_SLABINFO_WRITE] = '\0';
+
+ tmp = strchr(kbuf, ' ');
+ if (!tmp)
+ return -EINVAL;
+ *tmp = '\0';
+ tmp++;
+ limit = simple_strtol(tmp, &tmp, 10);
+ while (*tmp == ' ')
+ tmp++;
+ batchcount = simple_strtol(tmp, &tmp, 10);
+
+ /* Find the cache in the chain of caches. */
+ down(&cache_chain_sem);
+ res = -EINVAL;
+ list_for_each(p,&cache_chain) {
+ kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+
+ if (!strcmp(cachep->name, kbuf)) {
+ res = kmem_tune_cpucache(cachep, limit, batchcount);
+ break;
+ }
+ }
+ up(&cache_chain_sem);
+ if (res >= 0)
+ res = count;
+ return res;
+#else
+ return -EINVAL;
+#endif
+}
+#endif
+
+
+#ifdef NO_MM
+/*
+ * return the total memory allocated for this pointer, not
+ * just what the caller asked for
+ */
+
+size_t
+ksize(const void *objp)
+{
+ struct page *page;
+ kmem_cache_t *c;
+ unsigned long flags;
+ int size;
+
+ if (!objp)
+ return(0);
+ local_irq_save(flags);
+ size = MAP_NR(objp);
+ if (size < 0 || size >= max_mapnr)
+ return(0);
+ page = &mem_map[size];
+
+ if (!PageSlab(page)) { /* not allocated with kmalloc */
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ if ((page->index & ~0xffff) == 0xa1c20000)
+ size = (page->index & 0xffff) * PAGE_SIZE;
+ else
+#endif
+ {
+ if (page->index < 0 || page->index >= MAX_ORDER)
+ printk("ksize on unknown page type (index=%ld)!\n",
+ page->index);
+ size = PAGE_SIZE << page->index;
+ }
+ } else {
+ CHECK_PAGE(page);
+ c = GET_PAGE_CACHE(page);
+ size = c->objsize;
+ }
+ local_irq_restore(flags);
+ return size;
+}
+
+#endif
diff --git a/uClinux-2.4.20-uc1/mmnommu/swap.c b/uClinux-2.4.20-uc1/mmnommu/swap.c
new file mode 100644
index 0000000..4951765
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/swap.c
@@ -0,0 +1,113 @@
+/*
+ * linux/mm/swap.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+/*
+ * This file contains the default values for the opereation of the
+ * Linux VM subsystem. Fine-tuning documentation can be found in
+ * linux/Documentation/sysctl/vm.txt.
+ * Started 18.12.91
+ * Swap aging added 23.2.95, Stephen Tweedie.
+ * Buffermem limits added 12.3.98, Rik van Riel.
+ */
+
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+
+#include <asm/dma.h>
+#include <asm/uaccess.h> /* for copy_to/from_user */
+#include <asm/pgtable.h>
+
+/* How many pages do we try to swap or page in/out together? */
+int page_cluster;
+
+pager_daemon_t pager_daemon = {
+ 512, /* base number for calculating the number of tries */
+ SWAP_CLUSTER_MAX, /* minimum number of tries */
+ 8, /* do swap I/O in clusters of this size */
+};
+
+/*
+ * Move an inactive page to the active list.
+ */
+static inline void activate_page_nolock(struct page * page)
+{
+ if (PageLRU(page) && !PageActive(page)) {
+ del_page_from_inactive_list(page);
+ add_page_to_active_list(page);
+ }
+}
+
+void activate_page(struct page * page)
+{
+ spin_lock(&pagemap_lru_lock);
+ activate_page_nolock(page);
+ spin_unlock(&pagemap_lru_lock);
+}
+
+/**
+ * lru_cache_add: add a page to the page lists
+ * @page: the page to add
+ */
+void lru_cache_add(struct page * page)
+{
+ if (!PageLRU(page)) {
+ spin_lock(&pagemap_lru_lock);
+ if (!TestSetPageLRU(page))
+ add_page_to_inactive_list(page);
+ spin_unlock(&pagemap_lru_lock);
+ }
+}
+
+/**
+ * __lru_cache_del: remove a page from the page lists
+ * @page: the page to add
+ *
+ * This function is for when the caller already holds
+ * the pagemap_lru_lock.
+ */
+void __lru_cache_del(struct page * page)
+{
+ if (TestClearPageLRU(page)) {
+ if (PageActive(page)) {
+ del_page_from_active_list(page);
+ } else {
+ del_page_from_inactive_list(page);
+ }
+ }
+}
+
+/**
+ * lru_cache_del: remove a page from the page lists
+ * @page: the page to remove
+ */
+void lru_cache_del(struct page * page)
+{
+ spin_lock(&pagemap_lru_lock);
+ __lru_cache_del(page);
+ spin_unlock(&pagemap_lru_lock);
+}
+
+/*
+ * Perform any setup for the swap system
+ */
+void __init swap_setup(void)
+{
+ unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+
+ /* Use a smaller cluster for small-memory machines */
+ if (megs < 16)
+ page_cluster = 2;
+ else
+ page_cluster = 3;
+ /*
+ * Right now other parts of the system means that we
+ * _really_ don't want to cluster much more
+ */
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/swap_state.c b/uClinux-2.4.20-uc1/mmnommu/swap_state.c
new file mode 100644
index 0000000..1633b61
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/swap_state.c
@@ -0,0 +1,266 @@
+/*
+ * linux/mm/swap_state.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ *
+ * Rewritten to use page cache, (C) 1998 Stephen Tweedie
+ *
+ * NO_MM - Copyright (c) 2000 Lineo,Inc. David McCullough <davidm@lineo.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+#include <asm/pgtable.h>
+
+#ifndef NO_MM
+
+/*
+ * We may have stale swap cache pages in memory: notice
+ * them here and get rid of the unnecessary final write.
+ */
+static int swap_writepage(struct page *page)
+{
+ if (remove_exclusive_swap_page(page)) {
+ UnlockPage(page);
+ return 0;
+ }
+ rw_swap_page(WRITE, page);
+ return 0;
+}
+
+static struct address_space_operations swap_aops = {
+ writepage: swap_writepage,
+ sync_page: block_sync_page,
+};
+
+#endif /* NO_MM */
+
+struct address_space swapper_space = {
+ LIST_HEAD_INIT(swapper_space.clean_pages),
+ LIST_HEAD_INIT(swapper_space.dirty_pages),
+ LIST_HEAD_INIT(swapper_space.locked_pages),
+ 0, /* nrpages */
+#ifndef NO_MM
+ &swap_aops,
+#else
+ NULL,
+#endif
+};
+
+#ifndef NO_MM
+
+#ifdef SWAP_CACHE_INFO
+#define INC_CACHE_INFO(x) (swap_cache_info.x++)
+
+static struct {
+ unsigned long add_total;
+ unsigned long del_total;
+ unsigned long find_success;
+ unsigned long find_total;
+ unsigned long noent_race;
+ unsigned long exist_race;
+} swap_cache_info;
+
+void show_swap_cache_info(void)
+{
+ printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+ swap_cache_info.add_total, swap_cache_info.del_total,
+ swap_cache_info.find_success, swap_cache_info.find_total,
+ swap_cache_info.noent_race, swap_cache_info.exist_race);
+}
+#else
+#define INC_CACHE_INFO(x) do { } while (0)
+#endif
+
+int add_to_swap_cache(struct page *page, swp_entry_t entry)
+{
+ if (page->mapping)
+ BUG();
+ if (!swap_duplicate(entry)) {
+ INC_CACHE_INFO(noent_race);
+ return -ENOENT;
+ }
+ if (add_to_page_cache_unique(page, &swapper_space, entry.val,
+ page_hash(&swapper_space, entry.val)) != 0) {
+ swap_free(entry);
+ INC_CACHE_INFO(exist_race);
+ return -EEXIST;
+ }
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ BUG();
+ INC_CACHE_INFO(add_total);
+ return 0;
+}
+
+#endif /* NO_MM */
+
+/*
+ * This must be called only on pages that have
+ * been verified to be in the swap cache.
+ */
+void __delete_from_swap_cache(struct page *page)
+{
+#ifndef NO_MM
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ BUG();
+ ClearPageDirty(page);
+ __remove_inode_page(page);
+ INC_CACHE_INFO(del_total);
+#else
+ BUG();
+#endif /* NO_MM */
+}
+
+/*
+ * This must be called only on pages that have
+ * been verified to be in the swap cache and locked.
+ * It will never put the page into the free list,
+ * the caller has a reference on the page.
+ */
+void delete_from_swap_cache(struct page *page)
+{
+#ifndef NO_MM
+ swp_entry_t entry;
+
+ if (!PageLocked(page))
+ BUG();
+
+ if (unlikely(!block_flushpage(page, 0)))
+ BUG(); /* an anonymous page cannot have page->buffers set */
+
+ entry.val = page->index;
+
+ spin_lock(&pagecache_lock);
+ __delete_from_swap_cache(page);
+ spin_unlock(&pagecache_lock);
+
+ swap_free(entry);
+ page_cache_release(page);
+#else
+ BUG();
+#endif /* NO_MM */
+}
+
+/*
+ * Perform a free_page(), also freeing any swap cache associated with
+ * this page if it is the last user of the page. Can not do a lock_page,
+ * as we are holding the page_table_lock spinlock.
+ */
+void free_page_and_swap_cache(struct page *page)
+{
+ /*
+ * If we are the only user, then try to free up the swap cache.
+ *
+ * Its ok to check for PageSwapCache without the page lock
+ * here because we are going to recheck again inside
+ * exclusive_swap_page() _with_ the lock.
+ * - Marcelo
+ */
+ if (PageSwapCache(page) && !TryLockPage(page)) {
+#ifndef NO_MM
+ remove_exclusive_swap_page(page);
+#endif
+ UnlockPage(page);
+ }
+ page_cache_release(page);
+}
+
+/*
+ * Lookup a swap entry in the swap cache. A found page will be returned
+ * unlocked and with its refcount incremented - we rely on the kernel
+ * lock getting page table operations atomic even if we drop the page
+ * lock before returning.
+ */
+struct page * lookup_swap_cache(swp_entry_t entry)
+{
+#ifndef NO_MM
+ struct page *found;
+
+ found = find_get_page(&swapper_space, entry.val);
+ /*
+ * Unsafe to assert PageSwapCache and mapping on page found:
+ * if SMP nothing prevents swapoff from deleting this page from
+ * the swap cache at this moment. find_lock_page would prevent
+ * that, but no need to change: we _have_ got the right page.
+ */
+ INC_CACHE_INFO(find_total);
+ if (found)
+ INC_CACHE_INFO(find_success);
+ return found;
+#else
+ BUG();
+ return((struct page *) 0);
+#endif /* NO_MM */
+}
+
+/*
+ * Locate a page of swap in physical memory, reserving swap cache space
+ * and reading the disk if it is not already cached.
+ * A failure return means that either the page allocation failed or that
+ * the swap entry is no longer in use.
+ */
+
+struct page * read_swap_cache_async(swp_entry_t entry)
+{
+#ifndef NO_MM
+ struct page *found_page, *new_page = NULL;
+ int err;
+
+ do {
+ /*
+ * First check the swap cache. Since this is normally
+ * called after lookup_swap_cache() failed, re-calling
+ * that would confuse statistics: use find_get_page()
+ * directly.
+ */
+ found_page = find_get_page(&swapper_space, entry.val);
+ if (found_page)
+ break;
+
+ /*
+ * Get a new page to read into from swap.
+ */
+ if (!new_page) {
+ new_page = alloc_page(GFP_HIGHUSER);
+ if (!new_page)
+ break; /* Out of memory */
+ }
+
+ /*
+ * Associate the page with swap entry in the swap cache.
+ * May fail (-ENOENT) if swap entry has been freed since
+ * our caller observed it. May fail (-EEXIST) if there
+ * is already a page associated with this entry in the
+ * swap cache: added by a racing read_swap_cache_async,
+ * or by try_to_swap_out (or shmem_writepage) re-using
+ * the just freed swap entry for an existing page.
+ */
+ err = add_to_swap_cache(new_page, entry);
+ if (!err) {
+ /*
+ * Initiate read into locked page and return.
+ */
+ rw_swap_page(READ, new_page);
+ return new_page;
+ }
+ } while (err != -ENOENT);
+
+ if (new_page)
+ page_cache_release(new_page);
+ return found_page;
+#else
+ BUG();
+ return((struct page *) 0);
+#endif
+}
diff --git a/uClinux-2.4.20-uc1/mmnommu/swapfile.c b/uClinux-2.4.20-uc1/mmnommu/swapfile.c
new file mode 100644
index 0000000..2b8bf56
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/swapfile.c
@@ -0,0 +1,1316 @@
+/*
+ * linux/mm/swapfile.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ * NO_MM
+ * Copyright (c) 2001 Lineo, Inc. David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ */
+
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/blkdev.h> /* for blk_size */
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/shm.h>
+
+#include <asm/pgtable.h>
+
+#ifndef NO_MM
+
+spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
+unsigned int nr_swapfiles;
+int total_swap_pages;
+static int swap_overflow;
+
+static const char Bad_file[] = "Bad swap file entry ";
+static const char Unused_file[] = "Unused swap file entry ";
+static const char Bad_offset[] = "Bad swap offset entry ";
+static const char Unused_offset[] = "Unused swap offset entry ";
+
+struct swap_list_t swap_list = {-1, -1};
+
+struct swap_info_struct swap_info[MAX_SWAPFILES];
+
+#define SWAPFILE_CLUSTER 256
+
+static inline int scan_swap_map(struct swap_info_struct *si)
+{
+ unsigned long offset;
+ /*
+ * We try to cluster swap pages by allocating them
+ * sequentially in swap. Once we've allocated
+ * SWAPFILE_CLUSTER pages this way, however, we resort to
+ * first-free allocation, starting a new cluster. This
+ * prevents us from scattering swap pages all over the entire
+ * swap partition, so that we reduce overall disk seek times
+ * between swap pages. -- sct */
+ if (si->cluster_nr) {
+ while (si->cluster_next <= si->highest_bit) {
+ offset = si->cluster_next++;
+ if (si->swap_map[offset])
+ continue;
+ si->cluster_nr--;
+ goto got_page;
+ }
+ }
+ si->cluster_nr = SWAPFILE_CLUSTER;
+
+ /* try to find an empty (even not aligned) cluster. */
+ offset = si->lowest_bit;
+ check_next_cluster:
+ if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
+ {
+ int nr;
+ for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
+ if (si->swap_map[nr])
+ {
+ offset = nr+1;
+ goto check_next_cluster;
+ }
+ /* We found a completly empty cluster, so start
+ * using it.
+ */
+ goto got_page;
+ }
+ /* No luck, so now go finegrined as usual. -Andrea */
+ for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
+ if (si->swap_map[offset])
+ continue;
+ si->lowest_bit = offset+1;
+ got_page:
+ if (offset == si->lowest_bit)
+ si->lowest_bit++;
+ if (offset == si->highest_bit)
+ si->highest_bit--;
+ if (si->lowest_bit > si->highest_bit) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ }
+ si->swap_map[offset] = 1;
+ nr_swap_pages--;
+ si->cluster_next = offset+1;
+ return offset;
+ }
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ return 0;
+}
+
+swp_entry_t get_swap_page(void)
+{
+ struct swap_info_struct * p;
+ unsigned long offset;
+ swp_entry_t entry;
+ int type, wrapped = 0;
+
+ entry.val = 0; /* Out of memory */
+ swap_list_lock();
+ type = swap_list.next;
+ if (type < 0)
+ goto out;
+ if (nr_swap_pages <= 0)
+ goto out;
+
+ while (1) {
+ p = &swap_info[type];
+ if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
+ swap_device_lock(p);
+ offset = scan_swap_map(p);
+ swap_device_unlock(p);
+ if (offset) {
+ entry = SWP_ENTRY(type,offset);
+ type = swap_info[type].next;
+ if (type < 0 ||
+ p->prio != swap_info[type].prio) {
+ swap_list.next = swap_list.head;
+ } else {
+ swap_list.next = type;
+ }
+ goto out;
+ }
+ }
+ type = p->next;
+ if (!wrapped) {
+ if (type < 0 || p->prio != swap_info[type].prio) {
+ type = swap_list.head;
+ wrapped = 1;
+ }
+ } else
+ if (type < 0)
+ goto out; /* out of swap space */
+ }
+out:
+ swap_list_unlock();
+ return entry;
+}
+
+static struct swap_info_struct * swap_info_get(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ unsigned long offset, type;
+
+ if (!entry.val)
+ goto out;
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles)
+ goto bad_nofile;
+ p = & swap_info[type];
+ if (!(p->flags & SWP_USED))
+ goto bad_device;
+ offset = SWP_OFFSET(entry);
+ if (offset >= p->max)
+ goto bad_offset;
+ if (!p->swap_map[offset])
+ goto bad_free;
+ swap_list_lock();
+ if (p->prio > swap_info[swap_list.next].prio)
+ swap_list.next = type;
+ swap_device_lock(p);
+ return p;
+
+bad_free:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
+ goto out;
+bad_offset:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
+ goto out;
+bad_device:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
+ goto out;
+bad_nofile:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
+out:
+ return NULL;
+}
+
+static void swap_info_put(struct swap_info_struct * p)
+{
+ swap_device_unlock(p);
+ swap_list_unlock();
+}
+
+static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+{
+ int count = p->swap_map[offset];
+
+ if (count < SWAP_MAP_MAX) {
+ count--;
+ p->swap_map[offset] = count;
+ if (!count) {
+ if (offset < p->lowest_bit)
+ p->lowest_bit = offset;
+ if (offset > p->highest_bit)
+ p->highest_bit = offset;
+ nr_swap_pages++;
+ }
+ }
+ return count;
+}
+
+/*
+ * Caller has made sure that the swapdevice corresponding to entry
+ * is still around or has not been recycled.
+ */
+void swap_free(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+
+ p = swap_info_get(entry);
+ if (p) {
+ swap_entry_free(p, SWP_OFFSET(entry));
+ swap_info_put(p);
+ }
+}
+
+/*
+ * Check if we're the only user of a swap page,
+ * when the page is locked.
+ */
+static int exclusive_swap_page(struct page *page)
+{
+ int retval = 0;
+ struct swap_info_struct * p;
+ swp_entry_t entry;
+
+ entry.val = page->index;
+ p = swap_info_get(entry);
+ if (p) {
+ /* Is the only swap cache user the cache itself? */
+ if (p->swap_map[SWP_OFFSET(entry)] == 1) {
+ /* Recheck the page count with the pagecache lock held.. */
+ spin_lock(&pagecache_lock);
+ if (page_count(page) - !!page->buffers == 2)
+ retval = 1;
+ spin_unlock(&pagecache_lock);
+ }
+ swap_info_put(p);
+ }
+ return retval;
+}
+
+/*
+ * We can use this swap cache entry directly
+ * if there are no other references to it.
+ *
+ * Here "exclusive_swap_page()" does the real
+ * work, but we opportunistically check whether
+ * we need to get all the locks first..
+ */
+int can_share_swap_page(struct page *page)
+{
+ int retval = 0;
+
+ if (!PageLocked(page))
+ BUG();
+ switch (page_count(page)) {
+ case 3:
+ if (!page->buffers)
+ break;
+ /* Fallthrough */
+ case 2:
+ if (!PageSwapCache(page))
+ break;
+ retval = exclusive_swap_page(page);
+ break;
+ case 1:
+ if (PageReserved(page))
+ break;
+ retval = 1;
+ }
+ return retval;
+}
+
+/*
+ * Work out if there are any other processes sharing this
+ * swap cache page. Free it if you can. Return success.
+ */
+int remove_exclusive_swap_page(struct page *page)
+{
+ int retval;
+ struct swap_info_struct * p;
+ swp_entry_t entry;
+
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ return 0;
+ if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
+ return 0;
+
+ entry.val = page->index;
+ p = swap_info_get(entry);
+ if (!p)
+ return 0;
+
+ /* Is the only swap cache user the cache itself? */
+ retval = 0;
+ if (p->swap_map[SWP_OFFSET(entry)] == 1) {
+ /* Recheck the page count with the pagecache lock held.. */
+ spin_lock(&pagecache_lock);
+ if (page_count(page) - !!page->buffers == 2) {
+ __delete_from_swap_cache(page);
+ SetPageDirty(page);
+ retval = 1;
+ }
+ spin_unlock(&pagecache_lock);
+ }
+ swap_info_put(p);
+
+ if (retval) {
+ block_flushpage(page, 0);
+ swap_free(entry);
+ page_cache_release(page);
+ }
+
+ return retval;
+}
+
+/*
+ * Free the swap entry like above, but also try to
+ * free the page cache entry if it is the last user.
+ */
+void free_swap_and_cache(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ struct page *page = NULL;
+
+ p = swap_info_get(entry);
+ if (p) {
+ if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
+ page = find_trylock_page(&swapper_space, entry.val);
+ swap_info_put(p);
+ }
+ if (page) {
+ page_cache_get(page);
+ /* Only cache user (+us), or swap space full? Free it! */
+ if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ }
+}
+
+/*
+ * The swap entry has been read in advance, and we return 1 to indicate
+ * that the page has been used or is no longer needed.
+ *
+ * Always set the resulting pte to be nowrite (the same as COW pages
+ * after one process has exited). We don't know just how many PTEs will
+ * share this swap entry, so be cautious and let do_wp_page work out
+ * what to do if a write is requested later.
+ */
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
+ pte_t *dir, swp_entry_t entry, struct page* page)
+{
+ pte_t pte = *dir;
+
+ if (likely(pte_to_swp_entry(pte).val != entry.val))
+ return;
+ if (unlikely(pte_none(pte) || pte_present(pte)))
+ return;
+ get_page(page);
+ set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
+ swap_free(entry);
+ ++vma->vm_mm->rss;
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
+ unsigned long address, unsigned long size, unsigned long offset,
+ swp_entry_t entry, struct page* page)
+{
+ pte_t * pte;
+ unsigned long end;
+
+ if (pmd_none(*dir))
+ return;
+ if (pmd_bad(*dir)) {
+ pmd_ERROR(*dir);
+ pmd_clear(dir);
+ return;
+ }
+ pte = pte_offset(dir, address);
+ offset += address & PMD_MASK;
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
+ unsigned long address, unsigned long size,
+ swp_entry_t entry, struct page* page)
+{
+ pmd_t * pmd;
+ unsigned long offset, end;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, address);
+ offset = address & PGDIR_MASK;
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ unuse_pmd(vma, pmd, address, end - address, offset, entry,
+ page);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
+ swp_entry_t entry, struct page* page)
+{
+ unsigned long start = vma->vm_start, end = vma->vm_end;
+
+ if (start >= end)
+ BUG();
+ do {
+ unuse_pgd(vma, pgdir, start, end - start, entry, page);
+ start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ pgdir++;
+ } while (start && (start < end));
+}
+
+static void unuse_process(struct mm_struct * mm,
+ swp_entry_t entry, struct page* page)
+{
+ struct vm_area_struct* vma;
+
+ /*
+ * Go through process' page directory.
+ */
+ spin_lock(&mm->page_table_lock);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ pgd_t * pgd = pgd_offset(mm, vma->vm_start);
+ unuse_vma(vma, pgd, entry, page);
+ }
+ spin_unlock(&mm->page_table_lock);
+ return;
+}
+
+/*
+ * Scan swap_map from current position to next entry still in use.
+ * Recycle to start on reaching the end, returning 0 when empty.
+ */
+static int find_next_to_unuse(struct swap_info_struct *si, int prev)
+{
+ int max = si->max;
+ int i = prev;
+ int count;
+
+ /*
+ * No need for swap_device_lock(si) here: we're just looking
+ * for whether an entry is in use, not modifying it; false
+ * hits are okay, and sys_swapoff() has already prevented new
+ * allocations from this area (while holding swap_list_lock()).
+ */
+ for (;;) {
+ if (++i >= max) {
+ if (!prev) {
+ i = 0;
+ break;
+ }
+ /*
+ * No entries in use at top of swap_map,
+ * loop back to start and recheck there.
+ */
+ max = prev + 1;
+ prev = 0;
+ i = 1;
+ }
+ count = si->swap_map[i];
+ if (count && count != SWAP_MAP_BAD)
+ break;
+ }
+ return i;
+}
+
+/*
+ * We completely avoid races by reading each swap page in advance,
+ * and then search for the process using it. All the necessary
+ * page table adjustments can then be made atomically.
+ */
+static int try_to_unuse(unsigned int type)
+{
+ struct swap_info_struct * si = &swap_info[type];
+ struct mm_struct *start_mm;
+ unsigned short *swap_map;
+ unsigned short swcount;
+ struct page *page;
+ swp_entry_t entry;
+ int i = 0;
+ int retval = 0;
+ int reset_overflow = 0;
+
+ /*
+ * When searching mms for an entry, a good strategy is to
+ * start at the first mm we freed the previous entry from
+ * (though actually we don't notice whether we or coincidence
+ * freed the entry). Initialize this start_mm with a hold.
+ *
+ * A simpler strategy would be to start at the last mm we
+ * freed the previous entry from; but that would take less
+ * advantage of mmlist ordering (now preserved by swap_out()),
+ * which clusters forked address spaces together, most recent
+ * child immediately after parent. If we race with dup_mmap(),
+ * we very much want to resolve parent before child, otherwise
+ * we may miss some entries: using last mm would invert that.
+ */
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
+
+ /*
+ * Keep on scanning until all entries have gone. Usually,
+ * one pass through swap_map is enough, but not necessarily:
+ * mmput() removes mm from mmlist before exit_mmap() and its
+ * zap_page_range(). That's not too bad, those entries are
+ * on their way out, and handled faster there than here.
+ * do_munmap() behaves similarly, taking the range out of mm's
+ * vma list before zap_page_range(). But unfortunately, when
+ * unmapping a part of a vma, it takes the whole out first,
+ * then reinserts what's left after (might even reschedule if
+ * open() method called) - so swap entries may be invisible
+ * to swapoff for a while, then reappear - but that is rare.
+ */
+ while ((i = find_next_to_unuse(si, i))) {
+ /*
+ * Get a page for the entry, using the existing swap
+ * cache page if there is one. Otherwise, get a clean
+ * page and read the swap into it.
+ */
+ swap_map = &si->swap_map[i];
+ entry = SWP_ENTRY(type, i);
+ page = read_swap_cache_async(entry);
+ if (!page) {
+ /*
+ * Either swap_duplicate() failed because entry
+ * has been freed independently, and will not be
+ * reused since sys_swapoff() already disabled
+ * allocation from here, or alloc_page() failed.
+ */
+ if (!*swap_map)
+ continue;
+ retval = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Don't hold on to start_mm if it looks like exiting.
+ */
+ if (atomic_read(&start_mm->mm_users) == 1) {
+ mmput(start_mm);
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
+ }
+
+ /*
+ * Wait for and lock page. When do_swap_page races with
+ * try_to_unuse, do_swap_page can handle the fault much
+ * faster than try_to_unuse can locate the entry. This
+ * apparently redundant "wait_on_page" lets try_to_unuse
+ * defer to do_swap_page in such a case - in some tests,
+ * do_swap_page and try_to_unuse repeatedly compete.
+ */
+ wait_on_page(page);
+ lock_page(page);
+
+ /*
+ * Remove all references to entry, without blocking.
+ * Whenever we reach init_mm, there's no address space
+ * to search, but use it as a reminder to search shmem.
+ */
+ swcount = *swap_map;
+ if (swcount > 1) {
+ flush_page_to_ram(page);
+ if (start_mm == &init_mm)
+ shmem_unuse(entry, page);
+ else
+ unuse_process(start_mm, entry, page);
+ }
+ if (*swap_map > 1) {
+ int set_start_mm = (*swap_map >= swcount);
+ struct list_head *p = &start_mm->mmlist;
+ struct mm_struct *new_start_mm = start_mm;
+ struct mm_struct *mm;
+
+ spin_lock(&mmlist_lock);
+ while (*swap_map > 1 &&
+ (p = p->next) != &start_mm->mmlist) {
+ mm = list_entry(p, struct mm_struct, mmlist);
+ swcount = *swap_map;
+ if (mm == &init_mm) {
+ set_start_mm = 1;
+ shmem_unuse(entry, page);
+ } else
+ unuse_process(mm, entry, page);
+ if (set_start_mm && *swap_map < swcount) {
+ new_start_mm = mm;
+ set_start_mm = 0;
+ }
+ }
+ atomic_inc(&new_start_mm->mm_users);
+ spin_unlock(&mmlist_lock);
+ mmput(start_mm);
+ start_mm = new_start_mm;
+ }
+
+ /*
+ * How could swap count reach 0x7fff when the maximum
+ * pid is 0x7fff, and there's no way to repeat a swap
+ * page within an mm (except in shmem, where it's the
+ * shared object which takes the reference count)?
+ * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
+ *
+ * If that's wrong, then we should worry more about
+ * exit_mmap() and do_munmap() cases described above:
+ * we might be resetting SWAP_MAP_MAX too early here.
+ * We know "Undead"s can happen, they're okay, so don't
+ * report them; but do report if we reset SWAP_MAP_MAX.
+ */
+ if (*swap_map == SWAP_MAP_MAX) {
+ swap_list_lock();
+ swap_device_lock(si);
+ nr_swap_pages++;
+ *swap_map = 1;
+ swap_device_unlock(si);
+ swap_list_unlock();
+ reset_overflow = 1;
+ }
+
+ /*
+ * If a reference remains (rare), we would like to leave
+ * the page in the swap cache; but try_to_swap_out could
+ * then re-duplicate the entry once we drop page lock,
+ * so we might loop indefinitely; also, that page could
+ * not be swapped out to other storage meanwhile. So:
+ * delete from cache even if there's another reference,
+ * after ensuring that the data has been saved to disk -
+ * since if the reference remains (rarer), it will be
+ * read from disk into another page. Splitting into two
+ * pages would be incorrect if swap supported "shared
+ * private" pages, but they are handled by tmpfs files.
+ * Note shmem_unuse already deleted its from swap cache.
+ */
+ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
+ rw_swap_page(WRITE, page);
+ lock_page(page);
+ }
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
+
+ /*
+ * So we could skip searching mms once swap count went
+ * to 1, we did not mark any present ptes as dirty: must
+ * mark page dirty so try_to_swap_out will preserve it.
+ */
+ SetPageDirty(page);
+ UnlockPage(page);
+ page_cache_release(page);
+
+ /*
+ * Make sure that we aren't completely killing
+ * interactive performance. Interruptible check on
+ * signal_pending() would be nice, but changes the spec?
+ */
+ if (current->need_resched)
+ schedule();
+ }
+
+ mmput(start_mm);
+ if (reset_overflow) {
+ printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
+ swap_overflow = 0;
+ }
+ return retval;
+}
+#endif /* NO_MM */
+
+asmlinkage long sys_swapoff(const char * specialfile)
+{
+#ifndef NO_MM
+ struct swap_info_struct * p = NULL;
+ unsigned short *swap_map;
+ struct nameidata nd;
+ int i, type, prev;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = user_path_walk(specialfile, &nd);
+ if (err)
+ goto out;
+
+ lock_kernel();
+ prev = -1;
+ swap_list_lock();
+ for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
+ p = swap_info + type;
+ if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
+ if (p->swap_file == nd.dentry)
+ break;
+ }
+ prev = type;
+ }
+ err = -EINVAL;
+ if (type < 0) {
+ swap_list_unlock();
+ goto out_dput;
+ }
+
+ if (prev < 0) {
+ swap_list.head = p->next;
+ } else {
+ swap_info[prev].next = p->next;
+ }
+ if (type == swap_list.next) {
+ /* just pick something that's safe... */
+ swap_list.next = swap_list.head;
+ }
+ nr_swap_pages -= p->pages;
+ total_swap_pages -= p->pages;
+ p->flags = SWP_USED;
+ swap_list_unlock();
+ unlock_kernel();
+ err = try_to_unuse(type);
+ lock_kernel();
+ if (err) {
+ /* re-insert swap space back into swap_list */
+ swap_list_lock();
+ for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
+ if (p->prio >= swap_info[i].prio)
+ break;
+ p->next = i;
+ if (prev < 0)
+ swap_list.head = swap_list.next = p - swap_info;
+ else
+ swap_info[prev].next = p - swap_info;
+ nr_swap_pages += p->pages;
+ total_swap_pages += p->pages;
+ p->flags = SWP_WRITEOK;
+ swap_list_unlock();
+ goto out_dput;
+ }
+ if (p->swap_device)
+ blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
+ path_release(&nd);
+
+ swap_list_lock();
+ swap_device_lock(p);
+ nd.mnt = p->swap_vfsmnt;
+ nd.dentry = p->swap_file;
+ p->swap_vfsmnt = NULL;
+ p->swap_file = NULL;
+ p->swap_device = 0;
+ p->max = 0;
+ swap_map = p->swap_map;
+ p->swap_map = NULL;
+ p->flags = 0;
+ swap_device_unlock(p);
+ swap_list_unlock();
+ vfree(swap_map);
+ err = 0;
+
+out_dput:
+ unlock_kernel();
+ path_release(&nd);
+out:
+ return err;
+#else
+ return(-ENOSYS);
+#endif /* NO_MM */
+}
+
+int get_swaparea_info(char *buf)
+{
+#ifndef NO_MM
+ char * page = (char *) __get_free_page(GFP_KERNEL);
+ struct swap_info_struct *ptr = swap_info;
+ int i, j, len = 0, usedswap;
+
+ if (!page)
+ return -ENOMEM;
+
+ len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
+ for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
+ if ((ptr->flags & SWP_USED) && ptr->swap_map) {
+ char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
+ page, PAGE_SIZE);
+
+ len += sprintf(buf + len, "%-31s ", path);
+
+ if (!ptr->swap_device)
+ len += sprintf(buf + len, "file\t\t");
+ else
+ len += sprintf(buf + len, "partition\t");
+
+ usedswap = 0;
+ for (j = 0; j < ptr->max; ++j)
+ switch (ptr->swap_map[j]) {
+ case SWAP_MAP_BAD:
+ case 0:
+ continue;
+ default:
+ usedswap++;
+ }
+ len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10),
+ usedswap << (PAGE_SHIFT - 10), ptr->prio);
+ }
+ }
+ free_page((unsigned long) page);
+ return len;
+#else
+ return sprintf(buf, "No swap");
+#endif /* NO_MM */
+}
+
+int is_swap_partition(kdev_t dev) {
+#ifndef NO_MM
+ struct swap_info_struct *ptr = swap_info;
+ int i;
+
+ for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
+ if (ptr->flags & SWP_USED)
+ if (ptr->swap_device == dev)
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
+ *
+ * The swapon system call
+ */
+asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
+{
+#ifndef NO_MM
+ struct swap_info_struct * p;
+ struct nameidata nd;
+ struct inode * swap_inode;
+ unsigned int type;
+ int i, j, prev;
+ int error;
+ static int least_priority = 0;
+ union swap_header *swap_header = 0;
+ int swap_header_version;
+ int nr_good_pages = 0;
+ unsigned long maxpages = 1;
+ int swapfilesize;
+ struct block_device *bdev = NULL;
+ unsigned short *swap_map;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ lock_kernel();
+ swap_list_lock();
+ p = swap_info;
+ for (type = 0 ; type < nr_swapfiles ; type++,p++)
+ if (!(p->flags & SWP_USED))
+ break;
+ error = -EPERM;
+ if (type >= MAX_SWAPFILES) {
+ swap_list_unlock();
+ goto out;
+ }
+ if (type >= nr_swapfiles)
+ nr_swapfiles = type+1;
+ p->flags = SWP_USED;
+ p->swap_file = NULL;
+ p->swap_vfsmnt = NULL;
+ p->swap_device = 0;
+ p->swap_map = NULL;
+ p->lowest_bit = 0;
+ p->highest_bit = 0;
+ p->cluster_nr = 0;
+ p->sdev_lock = SPIN_LOCK_UNLOCKED;
+ p->next = -1;
+ if (swap_flags & SWAP_FLAG_PREFER) {
+ p->prio =
+ (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
+ } else {
+ p->prio = --least_priority;
+ }
+ swap_list_unlock();
+ error = user_path_walk(specialfile, &nd);
+ if (error)
+ goto bad_swap_2;
+
+ p->swap_file = nd.dentry;
+ p->swap_vfsmnt = nd.mnt;
+ swap_inode = nd.dentry->d_inode;
+ error = -EINVAL;
+
+ if (S_ISBLK(swap_inode->i_mode)) {
+ kdev_t dev = swap_inode->i_rdev;
+ struct block_device_operations *bdops;
+ devfs_handle_t de;
+
+ p->swap_device = dev;
+ set_blocksize(dev, PAGE_SIZE);
+
+ bd_acquire(swap_inode);
+ bdev = swap_inode->i_bdev;
+ de = devfs_get_handle_from_inode(swap_inode);
+ bdops = devfs_get_ops(de); /* Increments module use count */
+ if (bdops) bdev->bd_op = bdops;
+
+ error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
+ devfs_put_ops(de);/*Decrement module use count now we're safe*/
+ if (error)
+ goto bad_swap_2;
+ set_blocksize(dev, PAGE_SIZE);
+ error = -ENODEV;
+ if (!dev || (blk_size[MAJOR(dev)] &&
+ !blk_size[MAJOR(dev)][MINOR(dev)]))
+ goto bad_swap;
+ swapfilesize = 0;
+ if (blk_size[MAJOR(dev)])
+ swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
+ >> (PAGE_SHIFT - 10);
+ } else if (S_ISREG(swap_inode->i_mode))
+ swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
+ else
+ goto bad_swap;
+
+ error = -EBUSY;
+ for (i = 0 ; i < nr_swapfiles ; i++) {
+ struct swap_info_struct *q = &swap_info[i];
+ if (i == type || !q->swap_file)
+ continue;
+ if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
+ goto bad_swap;
+ }
+
+ swap_header = (void *) __get_free_page(GFP_USER);
+ if (!swap_header) {
+ printk("Unable to start swapping: out of memory :-)\n");
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+
+ lock_page(virt_to_page(swap_header));
+ rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
+
+ if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
+ swap_header_version = 1;
+ else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
+ swap_header_version = 2;
+ else {
+ printk("Unable to find swap-space signature\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+
+ switch (swap_header_version) {
+ case 1:
+ memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
+ j = 0;
+ p->lowest_bit = 0;
+ p->highest_bit = 0;
+ for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
+ if (test_bit(i,(char *) swap_header)) {
+ if (!p->lowest_bit)
+ p->lowest_bit = i;
+ p->highest_bit = i;
+ maxpages = i+1;
+ j++;
+ }
+ }
+ nr_good_pages = j;
+ p->swap_map = vmalloc(maxpages * sizeof(short));
+ if (!p->swap_map) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ for (i = 1 ; i < maxpages ; i++) {
+ if (test_bit(i,(char *) swap_header))
+ p->swap_map[i] = 0;
+ else
+ p->swap_map[i] = SWAP_MAP_BAD;
+ }
+ break;
+
+ case 2:
+ /* Check the swap header's sub-version and the size of
+ the swap file and bad block lists */
+ if (swap_header->info.version != 1) {
+ printk(KERN_WARNING
+ "Unable to handle swap header version %d\n",
+ swap_header->info.version);
+ error = -EINVAL;
+ goto bad_swap;
+ }
+
+ p->lowest_bit = 1;
+ maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
+ if (maxpages > swap_header->info.last_page)
+ maxpages = swap_header->info.last_page;
+ p->highest_bit = maxpages - 1;
+
+ error = -EINVAL;
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ goto bad_swap;
+
+ /* OK, set up the swap map and apply the bad block list */
+ if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+
+ error = 0;
+ memset(p->swap_map, 0, maxpages * sizeof(short));
+ for (i=0; i<swap_header->info.nr_badpages; i++) {
+ int page = swap_header->info.badpages[i];
+ if (page <= 0 || page >= swap_header->info.last_page)
+ error = -EINVAL;
+ else
+ p->swap_map[page] = SWAP_MAP_BAD;
+ }
+ nr_good_pages = swap_header->info.last_page -
+ swap_header->info.nr_badpages -
+ 1 /* header page */;
+ if (error)
+ goto bad_swap;
+ }
+
+ if (swapfilesize && maxpages > swapfilesize) {
+ printk(KERN_WARNING
+ "Swap area shorter than signature indicates\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+ if (!nr_good_pages) {
+ printk(KERN_WARNING "Empty swap-file\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+ p->swap_map[0] = SWAP_MAP_BAD;
+ swap_list_lock();
+ swap_device_lock(p);
+ p->max = maxpages;
+ p->flags = SWP_WRITEOK;
+ p->pages = nr_good_pages;
+ nr_swap_pages += nr_good_pages;
+ total_swap_pages += nr_good_pages;
+ printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
+ nr_good_pages<<(PAGE_SHIFT-10), p->prio);
+
+ /* insert swap space into swap_list: */
+ prev = -1;
+ for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
+ if (p->prio >= swap_info[i].prio) {
+ break;
+ }
+ prev = i;
+ }
+ p->next = i;
+ if (prev < 0) {
+ swap_list.head = swap_list.next = p - swap_info;
+ } else {
+ swap_info[prev].next = p - swap_info;
+ }
+ swap_device_unlock(p);
+ swap_list_unlock();
+ error = 0;
+ goto out;
+bad_swap:
+ if (bdev)
+ blkdev_put(bdev, BDEV_SWAP);
+bad_swap_2:
+ swap_list_lock();
+ swap_map = p->swap_map;
+ nd.mnt = p->swap_vfsmnt;
+ nd.dentry = p->swap_file;
+ p->swap_device = 0;
+ p->swap_file = NULL;
+ p->swap_vfsmnt = NULL;
+ p->swap_map = NULL;
+ p->flags = 0;
+ if (!(swap_flags & SWAP_FLAG_PREFER))
+ ++least_priority;
+ swap_list_unlock();
+ if (swap_map)
+ vfree(swap_map);
+ path_release(&nd);
+out:
+ if (swap_header)
+ free_page((long) swap_header);
+ unlock_kernel();
+ return error;
+#else
+ return(-ENOSYS);
+#endif /* NO_MM */
+}
+
+void si_swapinfo(struct sysinfo *val)
+{
+#ifndef NO_MM
+ unsigned int i;
+ unsigned long nr_to_be_unused = 0;
+
+ swap_list_lock();
+ for (i = 0; i < nr_swapfiles; i++) {
+ unsigned int j;
+ if (swap_info[i].flags != SWP_USED)
+ continue;
+ for (j = 0; j < swap_info[i].max; ++j) {
+ switch (swap_info[i].swap_map[j]) {
+ case 0:
+ case SWAP_MAP_BAD:
+ continue;
+ default:
+ nr_to_be_unused++;
+ }
+ }
+ }
+ val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->totalswap = total_swap_pages + nr_to_be_unused;
+ swap_list_unlock();
+#else
+ val->freeswap = val->totalswap = 0;
+#endif /* NO_MM */
+}
+
+#ifndef NO_MM
+/*
+ * Verify that a swap entry is valid and increment its swap map count.
+ *
+ * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
+ * "permanent", but will be reclaimed by the next swapoff.
+ */
+int swap_duplicate(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ unsigned long offset, type;
+ int result = 0;
+
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles)
+ goto bad_file;
+ p = type + swap_info;
+ offset = SWP_OFFSET(entry);
+
+ swap_device_lock(p);
+ if (offset < p->max && p->swap_map[offset]) {
+ if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
+ p->swap_map[offset]++;
+ result = 1;
+ } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
+ if (swap_overflow++ < 5)
+ printk(KERN_WARNING "swap_dup: swap entry overflow\n");
+ p->swap_map[offset] = SWAP_MAP_MAX;
+ result = 1;
+ }
+ }
+ swap_device_unlock(p);
+out:
+ return result;
+
+bad_file:
+ printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
+ goto out;
+}
+
+/*
+ * Page lock needs to be held in all cases to prevent races with
+ * swap file deletion.
+ */
+int swap_count(struct page *page)
+{
+ struct swap_info_struct * p;
+ unsigned long offset, type;
+ swp_entry_t entry;
+ int retval = 0;
+
+ entry.val = page->index;
+ if (!entry.val)
+ goto bad_entry;
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles)
+ goto bad_file;
+ p = type + swap_info;
+ offset = SWP_OFFSET(entry);
+ if (offset >= p->max)
+ goto bad_offset;
+ if (!p->swap_map[offset])
+ goto bad_unused;
+ retval = p->swap_map[offset];
+out:
+ return retval;
+
+bad_entry:
+ printk(KERN_ERR "swap_count: null entry!\n");
+ goto out;
+bad_file:
+ printk(KERN_ERR "swap_count: %s%08lx\n", Bad_file, entry.val);
+ goto out;
+bad_offset:
+ printk(KERN_ERR "swap_count: %s%08lx\n", Bad_offset, entry.val);
+ goto out;
+bad_unused:
+ printk(KERN_ERR "swap_count: %s%08lx\n", Unused_offset, entry.val);
+ goto out;
+}
+
+/*
+ * Prior swap_duplicate protects against swap device deletion.
+ */
+void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
+ kdev_t *dev, struct inode **swapf)
+{
+ unsigned long type;
+ struct swap_info_struct *p;
+
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
+ return;
+ }
+
+ p = &swap_info[type];
+ *offset = SWP_OFFSET(entry);
+ if (*offset >= p->max && *offset != 0) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val);
+ return;
+ }
+ if (p->swap_map && !p->swap_map[*offset]) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val);
+ return;
+ }
+ if (!(p->flags & SWP_USED)) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val);
+ return;
+ }
+
+ if (p->swap_device) {
+ *dev = p->swap_device;
+ } else if (p->swap_file) {
+ *swapf = p->swap_file->d_inode;
+ } else {
+ printk(KERN_ERR "rw_swap_page: no swap file or device\n");
+ }
+ return;
+}
+
+/*
+ * swap_device_lock prevents swap_map being freed. Don't grab an extra
+ * reference on the swaphandle, it doesn't matter if it becomes unused.
+ */
+int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
+{
+ int ret = 0, i = 1 << page_cluster;
+ unsigned long toff;
+ struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
+
+ if (!page_cluster) /* no readahead */
+ return 0;
+ toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
+ if (!toff) /* first page is swap header */
+ toff++, i--;
+ *offset = toff;
+
+ swap_device_lock(swapdev);
+ do {
+ /* Don't read-ahead past the end of the swap area */
+ if (toff >= swapdev->max)
+ break;
+ /* Don't read in free or bad pages */
+ if (!swapdev->swap_map[toff])
+ break;
+ if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
+ break;
+ toff++;
+ ret++;
+ } while (--i);
+ swap_device_unlock(swapdev);
+ return ret;
+}
+
+#endif /* NO_MM */
diff --git a/uClinux-2.4.20-uc1/mmnommu/vmalloc.c b/uClinux-2.4.20-uc1/mmnommu/vmalloc.c
new file mode 100644
index 0000000..47595aa
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/vmalloc.c
@@ -0,0 +1,51 @@
+/*
+ * linux/mm/vmalloc.c
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
+ * Copyright (c) 2001 Lineo Inc., David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
+struct vm_struct * vmlist;
+
+void vfree(void * addr)
+{
+ kfree(addr);
+}
+
+void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
+{
+ /*
+ * kmalloc doesn't like __GFP_HIGHMEM for some reason
+ * I doubt we need it - DAVIDM
+ */
+ return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
+}
+
+long vread(char *buf, char *addr, unsigned long count)
+{
+ memcpy(buf, addr, count);
+ return count;
+}
+
+long vwrite(char *buf, char *addr, unsigned long count)
+{
+ /* Don't allow overflow */
+ if ((unsigned long) addr + count < count)
+ count = -(unsigned long) addr;
+
+ memcpy(addr, buf, count);
+ return(count);
+}
+
diff --git a/uClinux-2.4.20-uc1/mmnommu/vmscan.c b/uClinux-2.4.20-uc1/mmnommu/vmscan.c
new file mode 100644
index 0000000..1c8d779
--- /dev/null
+++ b/uClinux-2.4.20-uc1/mmnommu/vmscan.c
@@ -0,0 +1,799 @@
+/*
+ * linux/mm/vmscan.c
+ *
+ * The pageout daemon, decides which pages to evict (swap out) and
+ * does the actual work of freeing them.
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * NO_MM
+ * Copyright (c) 2001 Lineo, Inc. David McCullough <davidm@lineo.com>
+ * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> ref uClinux 2.0
+ *
+ * Swap reorganised 29.12.95, Stephen Tweedie.
+ * kswapd added: 7.1.96 sct
+ * Removed kswapd_ctl limits, and swap out as many pages as needed
+ * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
+ * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
+ * Multiqueue VM started 5.8.00, Rik van Riel.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/file.h>
+#include <linux/config.h>
+
+#include <asm/pgalloc.h>
+
+/*
+ * The "priority" of VM scanning is how much of the queues we
+ * will scan in one go. A value of 6 for DEF_PRIORITY implies
+ * that we'll scan 1/64th of the queues ("queue_length >> 6")
+ * during a normal aging round.
+ */
+#define DEF_PRIORITY (6)
+
+#ifndef NO_MM
+/*
+ * The swap-out function returns 1 if it successfully
+ * scanned all the pages it was asked to (`count').
+ * It returns zero if it couldn't do anything,
+ *
+ * rss may decrease because pages are shared, but this
+ * doesn't count as having freed a page.
+ */
+
+/* mm->page_table_lock is held. mmap_sem is not held */
+static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone)
+{
+ pte_t pte;
+ swp_entry_t entry;
+
+ /* Don't look at this pte if it's been accessed recently. */
+ if ((vma->vm_flags & VM_LOCKED) || ptep_test_and_clear_young(page_table)) {
+ mark_page_accessed(page);
+ return 0;
+ }
+
+ /* Don't bother unmapping pages that are active */
+ if (PageActive(page))
+ return 0;
+
+ /* Don't bother replenishing zones not under pressure.. */
+ if (!memclass(page_zone(page), classzone))
+ return 0;
+
+ if (TryLockPage(page))
+ return 0;
+
+ /* From this point on, the odds are that we're going to
+ * nuke this pte, so read and clear the pte. This hook
+ * is needed on CPUs which update the accessed and dirty
+ * bits in hardware.
+ */
+ flush_cache_page(vma, address);
+ pte = ptep_get_and_clear(page_table);
+ flush_tlb_page(vma, address);
+
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+
+ /*
+ * Is the page already in the swap cache? If so, then
+ * we can just drop our reference to it without doing
+ * any IO - it's already up-to-date on disk.
+ */
+ if (PageSwapCache(page)) {
+ entry.val = page->index;
+ swap_duplicate(entry);
+set_swap_pte:
+ set_pte(page_table, swp_entry_to_pte(entry));
+drop_pte:
+ mm->rss--;
+#ifdef __arm__
+ memc_clear(vma->vm_mm, page);
+#endif
+ UnlockPage(page);
+ {
+ int freeable = page_count(page) - !!page->buffers <= 2;
+ page_cache_release(page);
+ return freeable;
+ }
+ }
+
+ /*
+ * Is it a clean page? Then it must be recoverable
+ * by just paging it in again, and we can just drop
+ * it.. or if it's dirty but has backing store,
+ * just mark the page dirty and drop it.
+ *
+ * However, this won't actually free any real
+ * memory, as the page will just be in the page cache
+ * somewhere, and as such we should just continue
+ * our scan.
+ *
+ * Basically, this just makes it possible for us to do
+ * some real work in the future in "refill_inactive()".
+ */
+ if (page->mapping)
+ goto drop_pte;
+ if (!PageDirty(page))
+ goto drop_pte;
+
+ /*
+ * Anonymous buffercache pages can be left behind by
+ * concurrent truncate and pagefault.
+ */
+ if (page->buffers)
+ goto preserve;
+
+ /*
+ * This is a dirty, swappable page. First of all,
+ * get a suitable swap entry for it, and make sure
+ * we have the swap cache set up to associate the
+ * page with that swap entry.
+ */
+ for (;;) {
+ entry = get_swap_page();
+ if (!entry.val)
+ break;
+ /* Add it to the swap cache and mark it dirty
+ * (adding to the page cache will clear the dirty
+ * and uptodate bits, so we need to do it again)
+ */
+ if (add_to_swap_cache(page, entry) == 0) {
+ SetPageUptodate(page);
+ set_page_dirty(page);
+ goto set_swap_pte;
+ }
+ /* Raced with "speculative" read_swap_cache_async */
+ swap_free(entry);
+ }
+
+ /* No swap space left */
+preserve:
+ set_pte(page_table, pte);
+ UnlockPage(page);
+ return 0;
+}
+
+/* mm->page_table_lock is held. mmap_sem is not held */
+static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
+{
+ pte_t * pte;
+ unsigned long pmd_end;
+
+ if (pmd_none(*dir))
+ return count;
+ if (pmd_bad(*dir)) {
+ pmd_ERROR(*dir);
+ pmd_clear(dir);
+ return count;
+ }
+
+ pte = pte_offset(dir, address);
+
+ pmd_end = (address + PMD_SIZE) & PMD_MASK;
+ if (end > pmd_end)
+ end = pmd_end;
+
+ do {
+ if (pte_present(*pte)) {
+ struct page *page = pte_page(*pte);
+
+ if (VALID_PAGE(page) && !PageReserved(page)) {
+ count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
+ if (!count) {
+ address += PAGE_SIZE;
+ break;
+ }
+ }
+ }
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+ mm->swap_address = address;
+ return count;
+}
+
+/* mm->page_table_lock is held. mmap_sem is not held */
+static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
+{
+ pmd_t * pmd;
+ unsigned long pgd_end;
+
+ if (pgd_none(*dir))
+ return count;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return count;
+ }
+
+ pmd = pmd_offset(dir, address);
+
+ pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK;
+ if (pgd_end && (end > pgd_end))
+ end = pgd_end;
+
+ do {
+ count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone);
+ if (!count)
+ break;
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return count;
+}
+
+/* mm->page_table_lock is held. mmap_sem is not held */
+static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count, zone_t * classzone)
+{
+ pgd_t *pgdir;
+ unsigned long end;
+
+ /* Don't swap out areas which are reserved */
+ if (vma->vm_flags & VM_RESERVED)
+ return count;
+
+ pgdir = pgd_offset(mm, address);
+
+ end = vma->vm_end;
+ BUG_ON(address >= end);
+ do {
+ count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone);
+ if (!count)
+ break;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ pgdir++;
+ } while (address && (address < end));
+ return count;
+}
+
+/* Placeholder for swap_out(): may be updated by fork.c:mmput() */
+struct mm_struct *swap_mm = &init_mm;
+
+/*
+ * Returns remaining count of pages to be swapped out by followup call.
+ */
+static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter, zone_t * classzone)
+{
+ unsigned long address;
+ struct vm_area_struct* vma;
+
+ /*
+ * Find the proper vm-area after freezing the vma chain
+ * and ptes.
+ */
+ spin_lock(&mm->page_table_lock);
+ address = mm->swap_address;
+ if (address == TASK_SIZE || swap_mm != mm) {
+ /* We raced: don't count this mm but try again */
+ ++*mmcounter;
+ goto out_unlock;
+ }
+ vma = find_vma(mm, address);
+ if (vma) {
+ if (address < vma->vm_start)
+ address = vma->vm_start;
+
+ for (;;) {
+ count = swap_out_vma(mm, vma, address, count, classzone);
+ vma = vma->vm_next;
+ if (!vma)
+ break;
+ if (!count)
+ goto out_unlock;
+ address = vma->vm_start;
+ }
+ }
+ /* Indicate that we reached the end of address space */
+ mm->swap_address = TASK_SIZE;
+
+out_unlock:
+ spin_unlock(&mm->page_table_lock);
+ return count;
+}
+
+#endif /* NO_MM */
+
+static int FASTCALL(swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone));
+static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone)
+{
+#ifndef NO_MM
+ int counter, nr_pages = SWAP_CLUSTER_MAX;
+ struct mm_struct *mm;
+
+ /* Then, look at the other mm's */
+ counter = mmlist_nr;
+ do {
+ if (unlikely(current->need_resched)) {
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ }
+
+ spin_lock(&mmlist_lock);
+ mm = swap_mm;
+ while (mm->swap_address == TASK_SIZE || mm == &init_mm) {
+ mm->swap_address = 0;
+ mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist);
+ if (mm == swap_mm)
+ goto empty;
+ swap_mm = mm;
+ }
+
+ /* Make sure the mm doesn't disappear when we drop the lock.. */
+ atomic_inc(&mm->mm_users);
+ spin_unlock(&mmlist_lock);
+
+ nr_pages = swap_out_mm(mm, nr_pages, &counter, classzone);
+
+ mmput(mm);
+
+ if (!nr_pages)
+ return 1;
+ } while (--counter >= 0);
+
+ return 0;
+
+empty:
+ spin_unlock(&mmlist_lock);
+#endif /* NO_MM */
+ return 0;
+}
+
+static int FASTCALL(shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority));
+static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)
+{
+ struct list_head * entry;
+ int max_scan = nr_inactive_pages / priority;
+ int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10);
+
+ spin_lock(&pagemap_lru_lock);
+ while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) {
+ struct page * page;
+
+ if (unlikely(current->need_resched)) {
+ spin_unlock(&pagemap_lru_lock);
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ spin_lock(&pagemap_lru_lock);
+ continue;
+ }
+
+ page = list_entry(entry, struct page, lru);
+
+ BUG_ON(!PageLRU(page));
+ BUG_ON(PageActive(page));
+
+ list_del(entry);
+ list_add(entry, &inactive_list);
+
+ /*
+ * Zero page counts can happen because we unlink the pages
+ * _after_ decrementing the usage count..
+ */
+ if (unlikely(!page_count(page)))
+ continue;
+
+ if (!memclass(page_zone(page), classzone))
+ continue;
+
+ /* Racy check to avoid trylocking when not worthwhile */
+ if (!page->buffers && (page_count(page) != 1 || !page->mapping))
+ goto page_mapped;
+
+ /*
+ * The page is locked. IO in progress?
+ * Move it to the back of the list.
+ */
+ if (unlikely(TryLockPage(page))) {
+ if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
+ page_cache_get(page);
+ spin_unlock(&pagemap_lru_lock);
+ wait_on_page(page);
+ page_cache_release(page);
+ spin_lock(&pagemap_lru_lock);
+ }
+ continue;
+ }
+
+ if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {
+ /*
+ * It is not critical here to write it only if
+ * the page is unmapped beause any direct writer
+ * like O_DIRECT would set the PG_dirty bitflag
+ * on the phisical page after having successfully
+ * pinned it and after the I/O to the page is finished,
+ * so the direct writes to the page cannot get lost.
+ */
+ int (*writepage)(struct page *);
+
+ writepage = page->mapping->a_ops->writepage;
+ if ((gfp_mask & __GFP_FS) && writepage) {
+ ClearPageDirty(page);
+ SetPageLaunder(page);
+ page_cache_get(page);
+ spin_unlock(&pagemap_lru_lock);
+
+ writepage(page);
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
+ continue;
+ }
+ }
+
+ /*
+ * If the page has buffers, try to free the buffer mappings
+ * associated with this page. If we succeed we try to free
+ * the page as well.
+ */
+ if (page->buffers) {
+ spin_unlock(&pagemap_lru_lock);
+
+ /* avoid to free a locked page */
+ page_cache_get(page);
+
+ if (try_to_release_page(page, gfp_mask)) {
+ if (!page->mapping) {
+ /*
+ * We must not allow an anon page
+ * with no buffers to be visible on
+ * the LRU, so we unlock the page after
+ * taking the lru lock
+ */
+ spin_lock(&pagemap_lru_lock);
+ UnlockPage(page);
+ __lru_cache_del(page);
+
+ /* effectively free the page here */
+ page_cache_release(page);
+
+ if (--nr_pages)
+ continue;
+ break;
+ } else {
+ /*
+ * The page is still in pagecache so undo the stuff
+ * before the try_to_release_page since we've not
+ * finished and we can now try the next step.
+ */
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
+ }
+ } else {
+ /* failed to drop the buffers so stop here */
+ UnlockPage(page);
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
+ continue;
+ }
+ }
+
+ spin_lock(&pagecache_lock);
+
+ /*
+ * this is the non-racy check for busy page.
+ */
+ if (!page->mapping || !is_page_cache_freeable(page)) {
+ spin_unlock(&pagecache_lock);
+ UnlockPage(page);
+page_mapped:
+ if (--max_mapped >= 0)
+ continue;
+
+ /*
+ * Alert! We've found too many mapped pages on the
+ * inactive list, so we start swapping out now!
+ */
+ spin_unlock(&pagemap_lru_lock);
+ swap_out(priority, gfp_mask, classzone);
+ return nr_pages;
+ }
+
+ /*
+ * It is critical to check PageDirty _after_ we made sure
+ * the page is freeable* so not in use by anybody.
+ */
+ if (PageDirty(page)) {
+ spin_unlock(&pagecache_lock);
+ UnlockPage(page);
+ continue;
+ }
+
+ /* point of no return */
+#ifdef NO_MM
+ __remove_inode_page(page);
+ spin_unlock(&pagecache_lock);
+#else /* !NO_MM */
+ if (likely(!PageSwapCache(page))) {
+ __remove_inode_page(page);
+ spin_unlock(&pagecache_lock);
+ } else {
+ swp_entry_t swap;
+ swap.val = page->index;
+ __delete_from_swap_cache(page);
+ /* must drop lru lock if getting swap_list lock */
+ spin_unlock(&pagecache_lock);
+ swap_free(swap);
+ }
+#endif /* !NO_MM */
+
+ __lru_cache_del(page);
+ UnlockPage(page);
+
+ /* effectively free the page here */
+ page_cache_release(page);
+
+ if (--nr_pages)
+ continue;
+ break;
+ }
+ spin_unlock(&pagemap_lru_lock);
+
+ return nr_pages;
+}
+
+/*
+ * This moves pages from the active list to
+ * the inactive list.
+ *
+ * We move them the other way when we see the
+ * reference bit on the page.
+ */
+static void refill_inactive(int nr_pages)
+{
+ struct list_head * entry;
+
+ spin_lock(&pagemap_lru_lock);
+ entry = active_list.prev;
+ while (nr_pages && entry != &active_list) {
+ struct page * page;
+
+ page = list_entry(entry, struct page, lru);
+ entry = entry->prev;
+ if (PageTestandClearReferenced(page)) {
+ list_del(&page->lru);
+ list_add(&page->lru, &active_list);
+ continue;
+ }
+
+ nr_pages--;
+
+ del_page_from_active_list(page);
+ add_page_to_inactive_list(page);
+ SetPageReferenced(page);
+ }
+ spin_unlock(&pagemap_lru_lock);
+}
+
+static int FASTCALL(shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages));
+static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages)
+{
+ int chunk_size = nr_pages;
+ unsigned long ratio;
+
+ nr_pages -= kmem_cache_reap(gfp_mask);
+ if (nr_pages <= 0)
+ return 0;
+
+ nr_pages = chunk_size;
+ /* try to keep the active list 2/3 of the size of the cache */
+ ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);
+ refill_inactive(ratio);
+
+ nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority);
+ if (nr_pages <= 0)
+ return 0;
+
+ shrink_dcache_memory(priority, gfp_mask);
+ shrink_icache_memory(priority, gfp_mask);
+#ifdef CONFIG_QUOTA
+ shrink_dqcache_memory(DEF_PRIORITY, gfp_mask);
+#endif
+
+ return nr_pages;
+}
+
+int try_to_free_pages_zone(zone_t *classzone, unsigned int gfp_mask)
+{
+ int priority = DEF_PRIORITY;
+ int nr_pages = SWAP_CLUSTER_MAX;
+
+ gfp_mask = pf_gfp_mask(gfp_mask);
+ do {
+ nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages);
+ if (nr_pages <= 0)
+ return 1;
+ } while (--priority);
+
+#ifndef CONFIG_CONTIGUOUS_PAGE_ALLOC
+ /*
+ * Hmm.. Cache shrink failed - time to kill something?
+ * Mhwahahhaha! This is the part I really like. Giggle.
+ */
+ out_of_memory();
+#endif
+ return 0;
+}
+
+int try_to_free_pages(unsigned int gfp_mask)
+{
+ pg_data_t *pgdat;
+ zonelist_t *zonelist;
+ unsigned long pf_free_pages;
+ int error = 0;
+
+ pf_free_pages = current->flags & PF_FREE_PAGES;
+ current->flags &= ~PF_FREE_PAGES;
+
+ for_each_pgdat(pgdat) {
+ zonelist = pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK);
+ error |= try_to_free_pages_zone(zonelist->zones[0], gfp_mask);
+ }
+
+ current->flags |= pf_free_pages;
+ return error;
+}
+
+DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);
+
+static int check_classzone_need_balance(zone_t * classzone)
+{
+ zone_t * first_classzone;
+
+ first_classzone = classzone->zone_pgdat->node_zones;
+ while (classzone >= first_classzone) {
+ if (classzone->free_pages > classzone->pages_high)
+ return 0;
+ classzone--;
+ }
+ return 1;
+}
+
+static int kswapd_balance_pgdat(pg_data_t * pgdat)
+{
+ int need_more_balance = 0, i;
+ zone_t * zone;
+
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ if (unlikely(current->need_resched))
+ schedule();
+ if (!zone->need_balance)
+ continue;
+ if (!try_to_free_pages_zone(zone, GFP_KSWAPD)) {
+ zone->need_balance = 0;
+#ifndef CONFIG_CONTIGUOUS_PAGE_ALLOC /* we always want the memory now !! */
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+#endif
+ continue;
+ }
+ if (check_classzone_need_balance(zone))
+ need_more_balance = 1;
+ else
+ zone->need_balance = 0;
+ }
+
+ return need_more_balance;
+}
+
+static void kswapd_balance(void)
+{
+ int need_more_balance;
+ pg_data_t * pgdat;
+
+ do {
+ need_more_balance = 0;
+
+ for_each_pgdat(pgdat)
+ need_more_balance |= kswapd_balance_pgdat(pgdat);
+ } while (need_more_balance);
+}
+
+static int kswapd_can_sleep_pgdat(pg_data_t * pgdat)
+{
+ zone_t * zone;
+ int i;
+
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ if (!zone->need_balance)
+ continue;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int kswapd_can_sleep(void)
+{
+ pg_data_t * pgdat;
+
+ for_each_pgdat(pgdat) {
+ if (!kswapd_can_sleep_pgdat(pgdat))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * The background pageout daemon, started as a kernel thread
+ * from the init process.
+ *
+ * This basically trickles out pages so that we have _some_
+ * free memory available even if there is no other activity
+ * that frees anything up. This is needed for things like routing
+ * etc, where we otherwise might have all activity going on in
+ * asynchronous contexts that cannot page things out.
+ *
+ * If there are applications that are active memory-allocators
+ * (most normal use), this basically shouldn't matter.
+ */
+int kswapd(void *unused)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ daemonize();
+ strcpy(tsk->comm, "kswapd");
+ sigfillset(&tsk->blocked);
+
+ /*
+ * Tell the memory management that we're a "memory allocator",
+ * and that if we need more memory we should get access to it
+ * regardless (see "__alloc_pages()"). "kswapd" should
+ * never get caught in the normal page freeing logic.
+ *
+ * (Kswapd normally doesn't need memory anyway, but sometimes
+ * you need a small amount of memory in order to be able to
+ * page out something else, and this flag essentially protects
+ * us from recursively trying to free more memory as we're
+ * trying to free the first piece of memory in the first place).
+ */
+ tsk->flags |= PF_MEMALLOC;
+
+ /*
+ * Kswapd main loop.
+ */
+ for (;;) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kswapd_wait, &wait);
+
+ mb();
+ if (kswapd_can_sleep())
+ schedule();
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kswapd_wait, &wait);
+
+ /*
+ * If we actually get into a low-memory situation,
+ * the processes needing more memory will wake us
+ * up on a more timely basis.
+ */
+ kswapd_balance();
+ run_task_queue(&tq_disk);
+ }
+}
+
+static int __init kswapd_init(void)
+{
+ printk("Starting kswapd\n");
+ swap_setup();
+ kernel_thread(kswapd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
+ return 0;
+}
+
+module_init(kswapd_init)