/* * linux/mmnommu/memory.c * * Copyright (c) 2000-2005 SnapGear Inc., David McCullough * Copyright (c) 2000 Lineo,Inc. David McCullough */ #include #include #include #include #include #include #include #include #include #include void *high_memory; mem_map_t * mem_map = NULL; unsigned long max_mapnr; unsigned long num_physpages; unsigned long num_mappedpages; unsigned long askedalloc, realalloc; /* * Force in an entire range of pages from the current process's user VA, * and pin them in physical memory. */ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len) { return(0); } /* * Mark all of the pages in a kiobuf as dirty * * We need to be able to deal with short reads from disk: if an IO error * occurs, the number of bytes read into memory may be less than the * size of the kiobuf, so we have to stop marking pages dirty once the * requested byte count has been reached. * * Must be called from process context - set_page_dirty() takes VFS locks. */ void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes) { int index, offset, remaining; struct page *page; index = iobuf->offset >> PAGE_SHIFT; offset = iobuf->offset & ~PAGE_MASK; remaining = bytes; if (remaining > iobuf->length) remaining = iobuf->length; while (remaining > 0 && index < iobuf->nr_pages) { page = iobuf->maplist[index]; if (!PageReserved(page)) set_page_dirty(page); remaining -= (PAGE_SIZE - offset); offset = 0; index++; } } /* * Unmap all of the pages referenced by a kiobuf. We release the pages, * and unlock them if they were locked. */ void unmap_kiobuf (struct kiobuf *iobuf) { } /* * Lock down all of the pages of a kiovec for IO. * * If any page is mapped twice in the kiovec, we return the error -EINVAL. * * The optional wait parameter causes the lock call to block until all * pages can be locked if set. If wait==0, the lock operation is * aborted if any locked pages are found and -EAGAIN is returned. */ int lock_kiovec(int nr, struct kiobuf *iovec[], int wait) { return 0; } /* * Unlock all of the pages of a kiovec after IO. */ int unlock_kiovec(int nr, struct kiobuf *iovec[]) { return 0; } /* * Handle all mappings that got truncated by a "truncate()" * system call. * * NOTE! We have to be ready to update the memory sharing * between the file and the memory map for a potential last * incomplete page. Ugly, but necessary. */ int vmtruncate(struct inode * inode, loff_t offset) { struct address_space *mapping = inode->i_mapping; unsigned long limit; if (inode->i_size < offset) goto do_expand; inode->i_size = offset; truncate_inode_pages(mapping, offset); goto out_truncate; do_expand: limit = current->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out; inode->i_size = offset; out_truncate: if (inode->i_op && inode->i_op->truncate) { lock_kernel(); inode->i_op->truncate(inode); unlock_kernel(); } return 0; out_sig: send_sig(SIGXFSZ, current, 0); out: return -EFBIG; } /* Note: this is only safe if the mm semaphore is held when called. */ int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) { return -EPERM; } /* * The nommu dodgy version :-) */ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int i; static struct vm_area_struct dummy_vma; for (i = 0; i < len; i++) { if (pages) { pages[i] = virt_to_page(start); if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = &dummy_vma; start += PAGE_SIZE; } return(i); } EXPORT_SYMBOL(get_user_pages); struct page * vmalloc_to_page(void * vmalloc_addr) { return(virt_to_page(vmalloc_addr)); }