summaryrefslogtreecommitdiff
path: root/libdde_linux26/contrib/mm
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2011-05-08 23:11:02 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2011-05-08 23:11:02 +0200
commitcded208c7ea6d107dcbfdb2e2d4622daf41c2886 (patch)
treea04a03736b0a928c2954382f924aadb105ee39cc /libdde_linux26/contrib/mm
parentfc82e00ca1e174cb961dea6ad37622e9b26cd899 (diff)
remove .svn directories
Diffstat (limited to 'libdde_linux26/contrib/mm')
-rw-r--r--libdde_linux26/contrib/mm/.svn/all-wcprops47
-rw-r--r--libdde_linux26/contrib/mm/.svn/entries266
-rw-r--r--libdde_linux26/contrib/mm/.svn/format1
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/backing-dev.c.svn-base306
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/bounce.c.svn-base301
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/dmapool.c.svn-base504
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/internal.h.svn-base285
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/mempool.c.svn-base340
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/swap.c.svn-base583
-rw-r--r--libdde_linux26/contrib/mm/.svn/text-base/util.c.svn-base208
10 files changed, 0 insertions, 2841 deletions
diff --git a/libdde_linux26/contrib/mm/.svn/all-wcprops b/libdde_linux26/contrib/mm/.svn/all-wcprops
deleted file mode 100644
index fff54b69..00000000
--- a/libdde_linux26/contrib/mm/.svn/all-wcprops
+++ /dev/null
@@ -1,47 +0,0 @@
-K 25
-svn:wc:ra_dav:version-url
-V 61
-/repos/tudos/!svn/ver/457/trunk/l4/pkg/dde/linux26/contrib/mm
-END
-internal.h
-K 25
-svn:wc:ra_dav:version-url
-V 72
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/internal.h
-END
-mempool.c
-K 25
-svn:wc:ra_dav:version-url
-V 71
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/mempool.c
-END
-backing-dev.c
-K 25
-svn:wc:ra_dav:version-url
-V 75
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/backing-dev.c
-END
-swap.c
-K 25
-svn:wc:ra_dav:version-url
-V 68
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/swap.c
-END
-bounce.c
-K 25
-svn:wc:ra_dav:version-url
-V 70
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/bounce.c
-END
-dmapool.c
-K 25
-svn:wc:ra_dav:version-url
-V 71
-/repos/tudos/!svn/ver/457/trunk/l4/pkg/dde/linux26/contrib/mm/dmapool.c
-END
-util.c
-K 25
-svn:wc:ra_dav:version-url
-V 68
-/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/mm/util.c
-END
diff --git a/libdde_linux26/contrib/mm/.svn/entries b/libdde_linux26/contrib/mm/.svn/entries
deleted file mode 100644
index 8aac477d..00000000
--- a/libdde_linux26/contrib/mm/.svn/entries
+++ /dev/null
@@ -1,266 +0,0 @@
-9
-
-dir
-465
-http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/mm
-http://svn.tudos.org/repos/tudos
-
-
-
-2009-05-23T02:50:17.774710Z
-457
-l4check
-
-
-svn:special svn:externals svn:needs-lock
-
-
-
-
-
-
-
-
-
-
-
-a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
-
-internal.h
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-244524357a9853d68ece4490c535d228
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-8040
-
-mempool.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-b75b6524b041c7b6218c015891fad97f
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-9244
-
-backing-dev.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-bf13dc6b3d2c8293c2f2f9b6239a1b69
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-7156
-
-swap.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-252749864fbeb278e4d85cb6a4b5126e
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-14844
-
-bounce.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-b1e346c1e15c441302e465c28461b15c
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-6661
-
-dmapool.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-db1b3b75b5bd95500f420a475c22cf1a
-2009-05-23T02:50:17.774710Z
-457
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-13164
-
-util.c
-file
-
-
-
-
-2009-11-15T17:16:37.000000Z
-735caf012a819db75b0fef563392d18c
-2009-05-20T14:32:55.606606Z
-455
-l4check
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-4343
-
diff --git a/libdde_linux26/contrib/mm/.svn/format b/libdde_linux26/contrib/mm/.svn/format
deleted file mode 100644
index ec635144..00000000
--- a/libdde_linux26/contrib/mm/.svn/format
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/backing-dev.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/backing-dev.c.svn-base
deleted file mode 100644
index 8e858744..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/backing-dev.c.svn-base
+++ /dev/null
@@ -1,306 +0,0 @@
-
-#include <linux/wait.h>
-#include <linux/backing-dev.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/writeback.h>
-#include <linux/device.h>
-
-
-static struct class *bdi_class;
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static struct dentry *bdi_debug_root;
-
-static void bdi_debug_init(void)
-{
- bdi_debug_root = debugfs_create_dir("bdi", NULL);
-}
-
-static int bdi_debug_stats_show(struct seq_file *m, void *v)
-{
- struct backing_dev_info *bdi = m->private;
- unsigned long background_thresh;
- unsigned long dirty_thresh;
- unsigned long bdi_thresh;
-
- get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
-
-#define K(x) ((x) << (PAGE_SHIFT - 10))
- seq_printf(m,
- "BdiWriteback: %8lu kB\n"
- "BdiReclaimable: %8lu kB\n"
- "BdiDirtyThresh: %8lu kB\n"
- "DirtyThresh: %8lu kB\n"
- "BackgroundThresh: %8lu kB\n",
- (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
- (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
- K(bdi_thresh),
- K(dirty_thresh),
- K(background_thresh));
-#undef K
-
- return 0;
-}
-
-static int bdi_debug_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bdi_debug_stats_show, inode->i_private);
-}
-
-static const struct file_operations bdi_debug_stats_fops = {
- .open = bdi_debug_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
-{
- bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
- bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
- bdi, &bdi_debug_stats_fops);
-}
-
-static void bdi_debug_unregister(struct backing_dev_info *bdi)
-{
- debugfs_remove(bdi->debug_stats);
- debugfs_remove(bdi->debug_dir);
-}
-#else
-static inline void bdi_debug_init(void)
-{
-}
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
- const char *name)
-{
-}
-static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
-{
-}
-#endif
-
-static ssize_t read_ahead_kb_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct backing_dev_info *bdi = dev_get_drvdata(dev);
- char *end;
- unsigned long read_ahead_kb;
- ssize_t ret = -EINVAL;
-
- read_ahead_kb = simple_strtoul(buf, &end, 10);
- if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
- bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
- ret = count;
- }
- return ret;
-}
-
-#define K(pages) ((pages) << (PAGE_SHIFT - 10))
-
-#define BDI_SHOW(name, expr) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *page) \
-{ \
- struct backing_dev_info *bdi = dev_get_drvdata(dev); \
- \
- return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
-}
-
-BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
-
-static ssize_t min_ratio_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct backing_dev_info *bdi = dev_get_drvdata(dev);
- char *end;
- unsigned int ratio;
- ssize_t ret = -EINVAL;
-
- ratio = simple_strtoul(buf, &end, 10);
- if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
- ret = bdi_set_min_ratio(bdi, ratio);
- if (!ret)
- ret = count;
- }
- return ret;
-}
-BDI_SHOW(min_ratio, bdi->min_ratio)
-
-static ssize_t max_ratio_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct backing_dev_info *bdi = dev_get_drvdata(dev);
- char *end;
- unsigned int ratio;
- ssize_t ret = -EINVAL;
-
- ratio = simple_strtoul(buf, &end, 10);
- if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
- ret = bdi_set_max_ratio(bdi, ratio);
- if (!ret)
- ret = count;
- }
- return ret;
-}
-BDI_SHOW(max_ratio, bdi->max_ratio)
-
-#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
-
-static struct device_attribute bdi_dev_attrs[] = {
- __ATTR_RW(read_ahead_kb),
- __ATTR_RW(min_ratio),
- __ATTR_RW(max_ratio),
- __ATTR_NULL,
-};
-
-static __init int bdi_class_init(void)
-{
- bdi_class = class_create(THIS_MODULE, "bdi");
- bdi_class->dev_attrs = bdi_dev_attrs;
- bdi_debug_init();
- return 0;
-}
-
-postcore_initcall(bdi_class_init);
-
-int bdi_register(struct backing_dev_info *bdi, struct device *parent,
- const char *fmt, ...)
-{
- va_list args;
- int ret = 0;
- struct device *dev;
-
- if (bdi->dev) /* The driver needs to use separate queues per device */
- goto exit;
-
- va_start(args, fmt);
- dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
- va_end(args);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto exit;
- }
-
- bdi->dev = dev;
- bdi_debug_register(bdi, dev_name(dev));
-
-exit:
- return ret;
-}
-EXPORT_SYMBOL(bdi_register);
-
-int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
-{
- return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
-}
-EXPORT_SYMBOL(bdi_register_dev);
-
-void bdi_unregister(struct backing_dev_info *bdi)
-{
- if (bdi->dev) {
- bdi_debug_unregister(bdi);
- device_unregister(bdi->dev);
- bdi->dev = NULL;
- }
-}
-EXPORT_SYMBOL(bdi_unregister);
-
-int bdi_init(struct backing_dev_info *bdi)
-{
- int i;
- int err;
-
- bdi->dev = NULL;
-
- bdi->min_ratio = 0;
- bdi->max_ratio = 100;
- bdi->max_prop_frac = PROP_FRAC_BASE;
-
- for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
- err = percpu_counter_init(&bdi->bdi_stat[i], 0);
- if (err)
- goto err;
- }
-
- bdi->dirty_exceeded = 0;
- err = prop_local_init_percpu(&bdi->completions);
-
- if (err) {
-err:
- while (i--)
- percpu_counter_destroy(&bdi->bdi_stat[i]);
- }
-
- return err;
-}
-EXPORT_SYMBOL(bdi_init);
-
-void bdi_destroy(struct backing_dev_info *bdi)
-{
- int i;
-
- bdi_unregister(bdi);
-
- for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
- percpu_counter_destroy(&bdi->bdi_stat[i]);
-
- prop_local_destroy_percpu(&bdi->completions);
-}
-EXPORT_SYMBOL(bdi_destroy);
-
-static wait_queue_head_t congestion_wqh[2] = {
- __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
- __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
- };
-
-
-void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
-{
- enum bdi_state bit;
- wait_queue_head_t *wqh = &congestion_wqh[rw];
-
- bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
- clear_bit(bit, &bdi->state);
- smp_mb__after_clear_bit();
- if (waitqueue_active(wqh))
- wake_up(wqh);
-}
-EXPORT_SYMBOL(clear_bdi_congested);
-
-void set_bdi_congested(struct backing_dev_info *bdi, int rw)
-{
- enum bdi_state bit;
-
- bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
- set_bit(bit, &bdi->state);
-}
-EXPORT_SYMBOL(set_bdi_congested);
-
-/**
- * congestion_wait - wait for a backing_dev to become uncongested
- * @rw: READ or WRITE
- * @timeout: timeout in jiffies
- *
- * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
- * write congestion. If no backing_devs are congested then just wait for the
- * next write to be completed.
- */
-long congestion_wait(int rw, long timeout)
-{
- long ret;
- DEFINE_WAIT(wait);
- wait_queue_head_t *wqh = &congestion_wqh[rw];
-
- prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
- ret = io_schedule_timeout(timeout);
- finish_wait(wqh, &wait);
- return ret;
-}
-EXPORT_SYMBOL(congestion_wait);
-
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/bounce.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/bounce.c.svn-base
deleted file mode 100644
index e590272f..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/bounce.c.svn-base
+++ /dev/null
@@ -1,301 +0,0 @@
-/* bounce buffer handling for block devices
- *
- * - Split from highmem.c
- */
-
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/swap.h>
-#include <linux/bio.h>
-#include <linux/pagemap.h>
-#include <linux/mempool.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/hash.h>
-#include <linux/highmem.h>
-#include <linux/blktrace_api.h>
-#include <trace/block.h>
-#include <asm/tlbflush.h>
-
-#define POOL_SIZE 64
-#define ISA_POOL_SIZE 16
-
-static mempool_t *page_pool, *isa_page_pool;
-
-DEFINE_TRACE(block_bio_bounce);
-
-#ifdef CONFIG_HIGHMEM
-static __init int init_emergency_pool(void)
-{
- struct sysinfo i;
- si_meminfo(&i);
- si_swapinfo(&i);
-
- if (!i.totalhigh)
- return 0;
-
- page_pool = mempool_create_page_pool(POOL_SIZE, 0);
- BUG_ON(!page_pool);
- printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
-
- return 0;
-}
-
-__initcall(init_emergency_pool);
-
-/*
- * highmem version, map in to vec
- */
-static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
-{
- unsigned long flags;
- unsigned char *vto;
-
- local_irq_save(flags);
- vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto, KM_BOUNCE_READ);
- local_irq_restore(flags);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom) \
- memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
-
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * allocate pages in the DMA region for the ISA pool
- */
-static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
-{
- return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
-}
-
-/*
- * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
- * as the max address, so check if the pool has already been created.
- */
-int init_emergency_isa_pool(void)
-{
- if (isa_page_pool)
- return 0;
-
- isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
- mempool_free_pages, (void *) 0);
- BUG_ON(!isa_page_pool);
-
- printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
- return 0;
-}
-
-/*
- * Simple bounce buffer support for highmem pages. Depending on the
- * queue gfp mask set, *to may or may not be a highmem page. kmap it
- * always, it will do the Right Thing
- */
-static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
-{
- unsigned char *vfrom;
- struct bio_vec *tovec, *fromvec;
- int i;
-
- __bio_for_each_segment(tovec, to, i, 0) {
- fromvec = from->bi_io_vec + i;
-
- /*
- * not bounced
- */
- if (tovec->bv_page == fromvec->bv_page)
- continue;
-
- /*
- * fromvec->bv_offset and fromvec->bv_len might have been
- * modified by the block layer, so use the original copy,
- * bounce_copy_vec already uses tovec->bv_len
- */
- vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
-
- flush_dcache_page(tovec->bv_page);
- bounce_copy_vec(tovec, vfrom);
- }
-}
-
-static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
-{
- struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, *org_vec;
- int i;
-
- if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
- set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
-
- /*
- * free up bounce indirect pages used
- */
- __bio_for_each_segment(bvec, bio, i, 0) {
- org_vec = bio_orig->bi_io_vec + i;
- if (bvec->bv_page == org_vec->bv_page)
- continue;
-
- dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
- mempool_free(bvec->bv_page, pool);
- }
-
- bio_endio(bio_orig, err);
- bio_put(bio);
-}
-
-static void bounce_end_io_write(struct bio *bio, int err)
-{
- bounce_end_io(bio, page_pool, err);
-}
-
-static void bounce_end_io_write_isa(struct bio *bio, int err)
-{
-
- bounce_end_io(bio, isa_page_pool, err);
-}
-
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
-{
- struct bio *bio_orig = bio->bi_private;
-
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- copy_to_high_bio_irq(bio_orig, bio);
-
- bounce_end_io(bio, pool, err);
-}
-
-static void bounce_end_io_read(struct bio *bio, int err)
-{
- __bounce_end_io_read(bio, page_pool, err);
-}
-
-static void bounce_end_io_read_isa(struct bio *bio, int err)
-{
- __bounce_end_io_read(bio, isa_page_pool, err);
-}
-
-static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
- mempool_t *pool)
-{
- struct page *page;
- struct bio *bio = NULL;
- int i, rw = bio_data_dir(*bio_orig);
- struct bio_vec *to, *from;
-
- bio_for_each_segment(from, *bio_orig, i) {
- page = from->bv_page;
-
- /*
- * is destination page below bounce pfn?
- */
- if (page_to_pfn(page) <= q->bounce_pfn)
- continue;
-
- /*
- * irk, bounce it
- */
- if (!bio) {
- unsigned int cnt = (*bio_orig)->bi_vcnt;
-
- bio = bio_alloc(GFP_NOIO, cnt);
- memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
- }
-
-
- to = bio->bi_io_vec + i;
-
- to->bv_page = mempool_alloc(pool, q->bounce_gfp);
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
- inc_zone_page_state(to->bv_page, NR_BOUNCE);
-
- if (rw == WRITE) {
- char *vto, *vfrom;
-
- flush_dcache_page(from->bv_page);
- vto = page_address(to->bv_page) + to->bv_offset;
- vfrom = kmap(from->bv_page) + from->bv_offset;
- memcpy(vto, vfrom, to->bv_len);
- kunmap(from->bv_page);
- }
- }
-
- /*
- * no pages bounced
- */
- if (!bio)
- return;
-
- trace_block_bio_bounce(q, *bio_orig);
-
- /*
- * at least one page was bounced, fill in possible non-highmem
- * pages
- */
- __bio_for_each_segment(from, *bio_orig, i, 0) {
- to = bio_iovec_idx(bio, i);
- if (!to->bv_page) {
- to->bv_page = from->bv_page;
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
- }
- }
-
- bio->bi_bdev = (*bio_orig)->bi_bdev;
- bio->bi_flags |= (1 << BIO_BOUNCED);
- bio->bi_sector = (*bio_orig)->bi_sector;
- bio->bi_rw = (*bio_orig)->bi_rw;
-
- bio->bi_vcnt = (*bio_orig)->bi_vcnt;
- bio->bi_idx = (*bio_orig)->bi_idx;
- bio->bi_size = (*bio_orig)->bi_size;
-
- if (pool == page_pool) {
- bio->bi_end_io = bounce_end_io_write;
- if (rw == READ)
- bio->bi_end_io = bounce_end_io_read;
- } else {
- bio->bi_end_io = bounce_end_io_write_isa;
- if (rw == READ)
- bio->bi_end_io = bounce_end_io_read_isa;
- }
-
- bio->bi_private = *bio_orig;
- *bio_orig = bio;
-}
-
-void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
-{
- mempool_t *pool;
-
- /*
- * Data-less bio, nothing to bounce
- */
- if (!bio_has_data(*bio_orig))
- return;
-
- /*
- * for non-isa bounce case, just check if the bounce pfn is equal
- * to or bigger than the highest pfn in the system -- in that case,
- * don't waste time iterating over bio segments
- */
- if (!(q->bounce_gfp & GFP_DMA)) {
- if (q->bounce_pfn >= blk_max_pfn)
- return;
- pool = page_pool;
- } else {
- BUG_ON(!isa_page_pool);
- pool = isa_page_pool;
- }
-
- /*
- * slow path
- */
- __blk_queue_bounce(q, bio_orig, pool);
-}
-
-EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/dmapool.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/dmapool.c.svn-base
deleted file mode 100644
index b1f0885d..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/dmapool.c.svn-base
+++ /dev/null
@@ -1,504 +0,0 @@
-/*
- * DMA Pool allocator
- *
- * Copyright 2001 David Brownell
- * Copyright 2007 Intel Corporation
- * Author: Matthew Wilcox <willy@linux.intel.com>
- *
- * This software may be redistributed and/or modified under the terms of
- * the GNU General Public License ("GPL") version 2 as published by the
- * Free Software Foundation.
- *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device. It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
- *
- * The current design of this allocator is fairly simple. The pool is
- * represented by the 'struct dma_pool' which keeps a doubly-linked list of
- * allocated pages. Each page in the page_list is split into blocks of at
- * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
- * list of free blocks within the page. Used blocks aren't tracked, but we
- * keep a count of how many are currently allocated from each page.
- */
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/poison.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
-#define DMAPOOL_DEBUG 1
-#endif
-
-struct dma_pool { /* the pool */
- struct list_head page_list;
- spinlock_t lock;
- size_t size;
- struct device *dev;
- size_t allocation;
- size_t boundary;
- char name[32];
- wait_queue_head_t waitq;
- struct list_head pools;
-};
-
-struct dma_page { /* cacheable header for 'allocation' bytes */
- struct list_head page_list;
- void *vaddr;
- dma_addr_t dma;
- unsigned int in_use;
- unsigned int offset;
-};
-
-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-
-static DEFINE_MUTEX(pools_lock);
-
-static ssize_t
-show_pools(struct device *dev, struct device_attribute *attr, char *buf)
-{
- unsigned temp;
- unsigned size;
- char *next;
- struct dma_page *page;
- struct dma_pool *pool;
-
- next = buf;
- size = PAGE_SIZE;
-
- temp = scnprintf(next, size, "poolinfo - 0.1\n");
- size -= temp;
- next += temp;
-
- mutex_lock(&pools_lock);
- list_for_each_entry(pool, &dev->dma_pools, pools) {
- unsigned pages = 0;
- unsigned blocks = 0;
-
- list_for_each_entry(page, &pool->page_list, page_list) {
- pages++;
- blocks += page->in_use;
- }
-
- /* per-pool info, no real statistics yet */
- temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
- pool->name, blocks,
- pages * (pool->allocation / pool->size),
- pool->size, pages);
- size -= temp;
- next += temp;
- }
- mutex_unlock(&pools_lock);
-
- return PAGE_SIZE - size;
-}
-
-static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
-
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: !in_interrupt()
- *
- * Returns a dma allocation pool with the requested characteristics, or
- * null if one can't be created. Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory. Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives. The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary. This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t boundary)
-{
- struct dma_pool *retval;
- size_t allocation;
-
- if (align == 0) {
- align = 1;
- } else if (align & (align - 1)) {
- return NULL;
- }
-
- if (size == 0) {
- return NULL;
- } else if (size < 4) {
- size = 4;
- }
-
- if ((size % align) != 0)
- size = ALIGN(size, align);
-
- allocation = max_t(size_t, size, PAGE_SIZE);
-
- if (!boundary) {
- boundary = allocation;
- } else if ((boundary < size) || (boundary & (boundary - 1))) {
- return NULL;
- }
-
- retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
- if (!retval)
- return retval;
-
- strlcpy(retval->name, name, sizeof(retval->name));
-
- retval->dev = dev;
-
- INIT_LIST_HEAD(&retval->page_list);
- spin_lock_init(&retval->lock);
- retval->size = size;
- retval->boundary = boundary;
- retval->allocation = allocation;
- init_waitqueue_head(&retval->waitq);
-
- if (dev) {
- int ret;
-
- mutex_lock(&pools_lock);
- if (list_empty(&dev->dma_pools))
- ret = device_create_file(dev, &dev_attr_pools);
- else
- ret = 0;
- /* note: not currently insisting "name" be unique */
- if (!ret)
- list_add(&retval->pools, &dev->dma_pools);
- else {
- kfree(retval);
- retval = NULL;
- }
- mutex_unlock(&pools_lock);
- } else
- INIT_LIST_HEAD(&retval->pools);
-
- return retval;
-}
-EXPORT_SYMBOL(dma_pool_create);
-
-static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
-{
- unsigned int offset = 0;
- unsigned int next_boundary = pool->boundary;
-
- do {
- unsigned int next = offset + pool->size;
- if (unlikely((next + pool->size) >= next_boundary)) {
- next = next_boundary;
- next_boundary += pool->boundary;
- }
- *(int *)(page->vaddr + offset) = next;
- offset = next;
- } while (offset < pool->allocation);
-}
-
-static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
-{
- struct dma_page *page;
-
- page = kmalloc(sizeof(*page), mem_flags);
- if (!page)
- return NULL;
- page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
- &page->dma, mem_flags);
- if (page->vaddr) {
-#ifdef DMAPOOL_DEBUG
- memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- pool_initialise_page(pool, page);
- list_add(&page->page_list, &pool->page_list);
- page->in_use = 0;
- page->offset = 0;
- } else {
- kfree(page);
- page = NULL;
- }
- return page;
-}
-
-static inline int is_page_busy(struct dma_page *page)
-{
- return page->in_use != 0;
-}
-
-static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
-{
- dma_addr_t dma = page->dma;
-
-#ifdef DMAPOOL_DEBUG
- memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
- list_del(&page->page_list);
- kfree(page);
-}
-
-/**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
- * @pool: dma pool that will be destroyed
- * Context: !in_interrupt()
- *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
- */
-void dma_pool_destroy(struct dma_pool *pool)
-{
- mutex_lock(&pools_lock);
- list_del(&pool->pools);
- if (pool->dev && list_empty(&pool->dev->dma_pools))
- device_remove_file(pool->dev, &dev_attr_pools);
- mutex_unlock(&pools_lock);
-
- while (!list_empty(&pool->page_list)) {
- struct dma_page *page;
- page = list_entry(pool->page_list.next,
- struct dma_page, page_list);
- if (is_page_busy(page)) {
- if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- else
- printk(KERN_ERR
- "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- /* leak the still-in-use consistent memory */
- list_del(&page->page_list);
- kfree(page);
- } else
- pool_free_page(pool, page);
- }
-
- kfree(pool);
-}
-EXPORT_SYMBOL(dma_pool_destroy);
-
-/**
- * dma_pool_alloc - get a block of consistent memory
- * @pool: dma pool that will produce the block
- * @mem_flags: GFP_* bitmask
- * @handle: pointer to dma address of block
- *
- * This returns the kernel virtual address of a currently unused block,
- * and reports its dma address through the handle.
- * If such a memory block can't be allocated, %NULL is returned.
- */
-void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-{
- unsigned long flags;
- struct dma_page *page;
- size_t offset;
- void *retval;
-
- spin_lock_irqsave(&pool->lock, flags);
- restart:
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (page->offset < pool->allocation)
- goto ready;
- }
- page = pool_alloc_page(pool, GFP_ATOMIC);
- if (!page) {
- if (mem_flags & __GFP_WAIT) {
- DECLARE_WAITQUEUE(wait, current);
-
- __set_current_state(TASK_INTERRUPTIBLE);
- __add_wait_queue(&pool->waitq, &wait);
- spin_unlock_irqrestore(&pool->lock, flags);
-
- schedule_timeout(POOL_TIMEOUT_JIFFIES);
-
- spin_lock_irqsave(&pool->lock, flags);
- __remove_wait_queue(&pool->waitq, &wait);
- goto restart;
- }
- retval = NULL;
- goto done;
- }
-
- ready:
- page->in_use++;
- offset = page->offset;
- page->offset = *(int *)(page->vaddr + offset);
- retval = offset + page->vaddr;
- *handle = offset + page->dma;
-#ifdef DMAPOOL_DEBUG
- memset(retval, POOL_POISON_ALLOCATED, pool->size);
-#endif
- done:
- spin_unlock_irqrestore(&pool->lock, flags);
- return retval;
-}
-EXPORT_SYMBOL(dma_pool_alloc);
-
-static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
-{
- unsigned long flags;
- struct dma_page *page;
-
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (dma < page->dma)
- continue;
- if (dma < (page->dma + pool->allocation))
- goto done;
- }
- page = NULL;
- done:
- spin_unlock_irqrestore(&pool->lock, flags);
- return page;
-}
-
-/**
- * dma_pool_free - put block back into dma pool
- * @pool: the dma pool holding the block
- * @vaddr: virtual address of block
- * @dma: dma address of block
- *
- * Caller promises neither device nor driver will again touch this block
- * unless it is first re-allocated.
- */
-void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
-{
- struct dma_page *page;
- unsigned long flags;
- unsigned int offset;
-
- page = pool_find_page(pool, dma);
- if (!page) {
- if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long)dma);
- else
- printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long)dma);
- return;
- }
-
- offset = vaddr - page->vaddr;
-#ifdef DMAPOOL_DEBUG
- if ((dma - page->dma) != offset) {
- if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long)dma);
- else
- printk(KERN_ERR
- "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long)dma);
- return;
- }
- {
- unsigned int chain = page->offset;
- while (chain < pool->allocation) {
- if (chain != offset) {
- chain = *(int *)(page->vaddr + chain);
- continue;
- }
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
- "already free\n", pool->name,
- (unsigned long long)dma);
- else
- printk(KERN_ERR "dma_pool_free %s, dma %Lx "
- "already free\n", pool->name,
- (unsigned long long)dma);
- return;
- }
- }
- memset(vaddr, POOL_POISON_FREED, pool->size);
-#endif
-
- spin_lock_irqsave(&pool->lock, flags);
- page->in_use--;
- *(int *)vaddr = page->offset;
- page->offset = offset;
- if (waitqueue_active(&pool->waitq))
- wake_up_locked(&pool->waitq);
- /*
- * Resist a temptation to do
- * if (!is_page_busy(page)) pool_free_page(pool, page);
- * Better have a few empty pages hang around.
- */
- spin_unlock_irqrestore(&pool->lock, flags);
-}
-EXPORT_SYMBOL(dma_pool_free);
-
-/*
- * Managed DMA pool
- */
-static void dmam_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- dma_pool_destroy(pool);
-}
-
-static int dmam_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-/**
- * dmam_pool_create - Managed dma_pool_create()
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
- *
- * Managed dma_pool_create(). DMA pool created with this function is
- * automatically destroyed on driver detach.
- */
-struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation)
-{
- struct dma_pool **ptr, *pool;
-
- ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
- if (pool)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
-
- return pool;
-}
-EXPORT_SYMBOL(dmam_pool_create);
-
-/**
- * dmam_pool_destroy - Managed dma_pool_destroy()
- * @pool: dma pool that will be destroyed
- *
- * Managed dma_pool_destroy().
- */
-void dmam_pool_destroy(struct dma_pool *pool)
-{
- struct device *dev = pool->dev;
-
- dma_pool_destroy(pool);
- WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
-}
-EXPORT_SYMBOL(dmam_pool_destroy);
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/internal.h.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/internal.h.svn-base
deleted file mode 100644
index 478223b7..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/internal.h.svn-base
+++ /dev/null
@@ -1,285 +0,0 @@
-/* internal.h: mm/ internal definitions
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef __MM_INTERNAL_H
-#define __MM_INTERNAL_H
-
-#include <linux/mm.h>
-
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long floor, unsigned long ceiling);
-
-extern void prep_compound_page(struct page *page, unsigned long order);
-extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
-
-static inline void set_page_count(struct page *page, int v)
-{
- atomic_set(&page->_count, v);
-}
-
-/*
- * Turn a non-refcounted page (->_count == 0) into refcounted with
- * a count of one.
- */
-static inline void set_page_refcounted(struct page *page)
-{
- VM_BUG_ON(PageTail(page));
- VM_BUG_ON(atomic_read(&page->_count));
- set_page_count(page, 1);
-}
-
-static inline void __put_page(struct page *page)
-{
- atomic_dec(&page->_count);
-}
-
-/*
- * in mm/vmscan.c:
- */
-extern int isolate_lru_page(struct page *page);
-extern void putback_lru_page(struct page *page);
-
-/*
- * in mm/page_alloc.c
- */
-extern unsigned long highest_memmap_pfn;
-extern void __free_pages_bootmem(struct page *page, unsigned int order);
-
-/*
- * function for dealing with page's order in buddy system.
- * zone->lock is already acquired when we use these.
- * So, we don't need atomic page->flags operations here.
- */
-static inline unsigned long page_order(struct page *page)
-{
- VM_BUG_ON(!PageBuddy(page));
- return page_private(page);
-}
-
-extern long mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
-extern void munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
-static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
-{
- munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
-}
-
-#ifdef CONFIG_UNEVICTABLE_LRU
-/*
- * unevictable_migrate_page() called only from migrate_page_copy() to
- * migrate unevictable flag to new page.
- * Note that the old page has been isolated from the LRU lists at this
- * point so we don't need to worry about LRU statistics.
- */
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
- if (TestClearPageUnevictable(old))
- SetPageUnevictable(new);
-}
-#else
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
-}
-#endif
-
-#ifdef CONFIG_UNEVICTABLE_LRU
-/*
- * Called only in fault path via page_evictable() for a new page
- * to determine if it's being mapped into a LOCKED vma.
- * If so, mark page as mlocked.
- */
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
-{
- VM_BUG_ON(PageLRU(page));
-
- if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
- return 0;
-
- if (!TestSetPageMlocked(page)) {
- inc_zone_page_state(page, NR_MLOCK);
- count_vm_event(UNEVICTABLE_PGMLOCKED);
- }
- return 1;
-}
-
-/*
- * must be called with vma's mmap_sem held for read, and page locked.
- */
-extern void mlock_vma_page(struct page *page);
-
-/*
- * Clear the page's PageMlocked(). This can be useful in a situation where
- * we want to unconditionally remove a page from the pagecache -- e.g.,
- * on truncation or freeing.
- *
- * It is legal to call this function for any page, mlocked or not.
- * If called for a page that is still mapped by mlocked vmas, all we do
- * is revert to lazy LRU behaviour -- semantics are not broken.
- */
-extern void __clear_page_mlock(struct page *page);
-static inline void clear_page_mlock(struct page *page)
-{
- if (unlikely(TestClearPageMlocked(page)))
- __clear_page_mlock(page);
-}
-
-/*
- * mlock_migrate_page - called only from migrate_page_copy() to
- * migrate the Mlocked page flag; update statistics.
- */
-static inline void mlock_migrate_page(struct page *newpage, struct page *page)
-{
- if (TestClearPageMlocked(page)) {
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_zone_page_state(page, NR_MLOCK);
- SetPageMlocked(newpage);
- __inc_zone_page_state(newpage, NR_MLOCK);
- local_irq_restore(flags);
- }
-}
-
-/*
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
- * Page should not be on lru, so no need to fix that up.
- * free_pages_check() will verify...
- */
-static inline void free_page_mlock(struct page *page)
-{
- if (unlikely(TestClearPageMlocked(page))) {
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_zone_page_state(page, NR_MLOCK);
- __count_vm_event(UNEVICTABLE_MLOCKFREED);
- local_irq_restore(flags);
- }
-}
-
-#else /* CONFIG_UNEVICTABLE_LRU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
-{
- return 0;
-}
-static inline void clear_page_mlock(struct page *page) { }
-static inline void mlock_vma_page(struct page *page) { }
-static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-static inline void free_page_mlock(struct page *page) { }
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Return the mem_map entry representing the 'offset' subpage within
- * the maximally aligned gigantic page 'base'. Handle any discontiguity
- * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
- */
-static inline struct page *mem_map_offset(struct page *base, int offset)
-{
- if (unlikely(offset >= MAX_ORDER_NR_PAGES))
- return pfn_to_page(page_to_pfn(base) + offset);
- return base + offset;
-}
-
-/*
- * Iterator over all subpages withing the maximally aligned gigantic
- * page 'base'. Handle any discontiguity in the mem_map.
- */
-static inline struct page *mem_map_next(struct page *iter,
- struct page *base, int offset)
-{
- if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
- unsigned long pfn = page_to_pfn(base) + offset;
- if (!pfn_valid(pfn))
- return NULL;
- return pfn_to_page(pfn);
- }
- return iter + 1;
-}
-
-/*
- * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
- * so all functions starting at paging_init should be marked __init
- * in those cases. SPARSEMEM, however, allows for memory hotplug,
- * and alloc_bootmem_node is not used.
- */
-#ifdef CONFIG_SPARSEMEM
-#define __paginginit __meminit
-#else
-#define __paginginit __init
-#endif
-
-/* Memory initialisation debug and verification */
-enum mminit_level {
- MMINIT_WARNING,
- MMINIT_VERIFY,
- MMINIT_TRACE
-};
-
-#ifdef CONFIG_DEBUG_MEMORY_INIT
-
-extern int mminit_loglevel;
-
-#define mminit_dprintk(level, prefix, fmt, arg...) \
-do { \
- if (level < mminit_loglevel) { \
- printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
- printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
- } \
-} while (0)
-
-extern void mminit_verify_pageflags_layout(void);
-extern void mminit_verify_page_links(struct page *page,
- enum zone_type zone, unsigned long nid, unsigned long pfn);
-extern void mminit_verify_zonelist(void);
-
-#else
-
-static inline void mminit_dprintk(enum mminit_level level,
- const char *prefix, const char *fmt, ...)
-{
-}
-
-static inline void mminit_verify_pageflags_layout(void)
-{
-}
-
-static inline void mminit_verify_page_links(struct page *page,
- enum zone_type zone, unsigned long nid, unsigned long pfn)
-{
-}
-
-static inline void mminit_verify_zonelist(void)
-{
-}
-#endif /* CONFIG_DEBUG_MEMORY_INIT */
-
-/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
-#if defined(CONFIG_SPARSEMEM)
-extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
- unsigned long *end_pfn);
-#else
-static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
- unsigned long *end_pfn)
-{
-}
-#endif /* CONFIG_SPARSEMEM */
-
-#define GUP_FLAGS_WRITE 0x1
-#define GUP_FLAGS_FORCE 0x2
-#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
-#define GUP_FLAGS_IGNORE_SIGKILL 0x8
-
-int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas);
-
-#endif
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/mempool.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/mempool.c.svn-base
deleted file mode 100644
index a46eb1b4..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/mempool.c.svn-base
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * linux/mm/mempool.c
- *
- * memory buffer pool support. Such pools are mostly used
- * for guaranteed, deadlock-free memory allocations during
- * extreme VM load.
- *
- * started by Ingo Molnar, Copyright (C) 2001
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mempool.h>
-#include <linux/blkdev.h>
-#include <linux/writeback.h>
-
-static void add_element(mempool_t *pool, void *element)
-{
- BUG_ON(pool->curr_nr >= pool->min_nr);
- pool->elements[pool->curr_nr++] = element;
-}
-
-static void *remove_element(mempool_t *pool)
-{
- BUG_ON(pool->curr_nr <= 0);
- return pool->elements[--pool->curr_nr];
-}
-
-static void free_pool(mempool_t *pool)
-{
- while (pool->curr_nr) {
- void *element = remove_element(pool);
- pool->free(element, pool->pool_data);
- }
- kfree(pool->elements);
- kfree(pool);
-}
-
-/**
- * mempool_create - create a memory pool
- * @min_nr: the minimum number of elements guaranteed to be
- * allocated for this pool.
- * @alloc_fn: user-defined element-allocation function.
- * @free_fn: user-defined element-freeing function.
- * @pool_data: optional private data available to the user-defined functions.
- *
- * this function creates and allocates a guaranteed size, preallocated
- * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
- * functions. This function might sleep. Both the alloc_fn() and the free_fn()
- * functions might sleep - as long as the mempool_alloc() function is not called
- * from IRQ contexts.
- */
-mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data)
-{
- return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
-}
-EXPORT_SYMBOL(mempool_create);
-
-mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data, int node_id)
-{
- mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
- if (!pool)
- return NULL;
- pool->elements = kmalloc_node(min_nr * sizeof(void *),
- GFP_KERNEL, node_id);
- if (!pool->elements) {
- kfree(pool);
- return NULL;
- }
- spin_lock_init(&pool->lock);
- pool->min_nr = min_nr;
- pool->pool_data = pool_data;
- init_waitqueue_head(&pool->wait);
- pool->alloc = alloc_fn;
- pool->free = free_fn;
-
- /*
- * First pre-allocate the guaranteed number of buffers.
- */
- while (pool->curr_nr < pool->min_nr) {
- void *element;
-
- element = pool->alloc(GFP_KERNEL, pool->pool_data);
- if (unlikely(!element)) {
- free_pool(pool);
- return NULL;
- }
- add_element(pool, element);
- }
- return pool;
-}
-EXPORT_SYMBOL(mempool_create_node);
-
-/**
- * mempool_resize - resize an existing memory pool
- * @pool: pointer to the memory pool which was allocated via
- * mempool_create().
- * @new_min_nr: the new minimum number of elements guaranteed to be
- * allocated for this pool.
- * @gfp_mask: the usual allocation bitmask.
- *
- * This function shrinks/grows the pool. In the case of growing,
- * it cannot be guaranteed that the pool will be grown to the new
- * size immediately, but new mempool_free() calls will refill it.
- *
- * Note, the caller must guarantee that no mempool_destroy is called
- * while this function is running. mempool_alloc() & mempool_free()
- * might be called (eg. from IRQ contexts) while this function executes.
- */
-int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
-{
- void *element;
- void **new_elements;
- unsigned long flags;
-
- BUG_ON(new_min_nr <= 0);
-
- spin_lock_irqsave(&pool->lock, flags);
- if (new_min_nr <= pool->min_nr) {
- while (new_min_nr < pool->curr_nr) {
- element = remove_element(pool);
- spin_unlock_irqrestore(&pool->lock, flags);
- pool->free(element, pool->pool_data);
- spin_lock_irqsave(&pool->lock, flags);
- }
- pool->min_nr = new_min_nr;
- goto out_unlock;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-
- /* Grow the pool */
- new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
- if (!new_elements)
- return -ENOMEM;
-
- spin_lock_irqsave(&pool->lock, flags);
- if (unlikely(new_min_nr <= pool->min_nr)) {
- /* Raced, other resize will do our work */
- spin_unlock_irqrestore(&pool->lock, flags);
- kfree(new_elements);
- goto out;
- }
- memcpy(new_elements, pool->elements,
- pool->curr_nr * sizeof(*new_elements));
- kfree(pool->elements);
- pool->elements = new_elements;
- pool->min_nr = new_min_nr;
-
- while (pool->curr_nr < pool->min_nr) {
- spin_unlock_irqrestore(&pool->lock, flags);
- element = pool->alloc(gfp_mask, pool->pool_data);
- if (!element)
- goto out;
- spin_lock_irqsave(&pool->lock, flags);
- if (pool->curr_nr < pool->min_nr) {
- add_element(pool, element);
- } else {
- spin_unlock_irqrestore(&pool->lock, flags);
- pool->free(element, pool->pool_data); /* Raced */
- goto out;
- }
- }
-out_unlock:
- spin_unlock_irqrestore(&pool->lock, flags);
-out:
- return 0;
-}
-EXPORT_SYMBOL(mempool_resize);
-
-/**
- * mempool_destroy - deallocate a memory pool
- * @pool: pointer to the memory pool which was allocated via
- * mempool_create().
- *
- * this function only sleeps if the free_fn() function sleeps. The caller
- * has to guarantee that all elements have been returned to the pool (ie:
- * freed) prior to calling mempool_destroy().
- */
-void mempool_destroy(mempool_t *pool)
-{
- /* Check for outstanding elements */
- BUG_ON(pool->curr_nr != pool->min_nr);
- free_pool(pool);
-}
-EXPORT_SYMBOL(mempool_destroy);
-
-/**
- * mempool_alloc - allocate an element from a specific memory pool
- * @pool: pointer to the memory pool which was allocated via
- * mempool_create().
- * @gfp_mask: the usual allocation bitmask.
- *
- * this function only sleeps if the alloc_fn() function sleeps or
- * returns NULL. Note that due to preallocation, this function
- * *never* fails when called from process contexts. (it might
- * fail if called from an IRQ context.)
- */
-void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
-{
- void *element;
- unsigned long flags;
- wait_queue_t wait;
- gfp_t gfp_temp;
-
- might_sleep_if(gfp_mask & __GFP_WAIT);
-
- gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
- gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
- gfp_mask |= __GFP_NOWARN; /* failures are OK */
-
- gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
-
-repeat_alloc:
-
- element = pool->alloc(gfp_temp, pool->pool_data);
- if (likely(element != NULL))
- return element;
-
- spin_lock_irqsave(&pool->lock, flags);
- if (likely(pool->curr_nr)) {
- element = remove_element(pool);
- spin_unlock_irqrestore(&pool->lock, flags);
- return element;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-
- /* We must not sleep in the GFP_ATOMIC case */
- if (!(gfp_mask & __GFP_WAIT))
- return NULL;
-
- /* Now start performing page reclaim */
- gfp_temp = gfp_mask;
- init_wait(&wait);
- prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
- smp_mb();
- if (!pool->curr_nr) {
- /*
- * FIXME: this should be io_schedule(). The timeout is there
- * as a workaround for some DM problems in 2.6.18.
- */
- io_schedule_timeout(5*HZ);
- }
- finish_wait(&pool->wait, &wait);
-
- goto repeat_alloc;
-}
-EXPORT_SYMBOL(mempool_alloc);
-
-/**
- * mempool_free - return an element to the pool.
- * @element: pool element pointer.
- * @pool: pointer to the memory pool which was allocated via
- * mempool_create().
- *
- * this function only sleeps if the free_fn() function sleeps.
- */
-void mempool_free(void *element, mempool_t *pool)
-{
- unsigned long flags;
-
- if (unlikely(element == NULL))
- return;
-
- smp_mb();
- if (pool->curr_nr < pool->min_nr) {
- spin_lock_irqsave(&pool->lock, flags);
- if (pool->curr_nr < pool->min_nr) {
- add_element(pool, element);
- spin_unlock_irqrestore(&pool->lock, flags);
- wake_up(&pool->wait);
- return;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
- }
- pool->free(element, pool->pool_data);
-}
-EXPORT_SYMBOL(mempool_free);
-
-/*
- * A commonly used alloc and free fn.
- */
-void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
-{
- struct kmem_cache *mem = pool_data;
- return kmem_cache_alloc(mem, gfp_mask);
-}
-EXPORT_SYMBOL(mempool_alloc_slab);
-
-void mempool_free_slab(void *element, void *pool_data)
-{
- struct kmem_cache *mem = pool_data;
- kmem_cache_free(mem, element);
-}
-EXPORT_SYMBOL(mempool_free_slab);
-
-/*
- * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
- * specified by pool_data
- */
-void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
-{
- size_t size = (size_t)(long)pool_data;
- return kmalloc(size, gfp_mask);
-}
-EXPORT_SYMBOL(mempool_kmalloc);
-
-void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
-{
- size_t size = (size_t) pool_data;
- return kzalloc(size, gfp_mask);
-}
-EXPORT_SYMBOL(mempool_kzalloc);
-
-void mempool_kfree(void *element, void *pool_data)
-{
- kfree(element);
-}
-EXPORT_SYMBOL(mempool_kfree);
-
-/*
- * A simple mempool-backed page allocator that allocates pages
- * of the order specified by pool_data.
- */
-void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
-{
- int order = (int)(long)pool_data;
- return alloc_pages(gfp_mask, order);
-}
-EXPORT_SYMBOL(mempool_alloc_pages);
-
-void mempool_free_pages(void *element, void *pool_data)
-{
- int order = (int)(long)pool_data;
- __free_pages(element, order);
-}
-EXPORT_SYMBOL(mempool_free_pages);
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/swap.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/swap.c.svn-base
deleted file mode 100644
index 8adb9feb..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/swap.c.svn-base
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * linux/mm/swap.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- */
-
-/*
- * This file contains the default values for the operation of the
- * Linux VM subsystem. Fine-tuning documentation can be found in
- * Documentation/sysctl/vm.txt.
- * Started 18.12.91
- * Swap aging added 23.2.95, Stephen Tweedie.
- * Buffermem limits added 12.3.98, Rik van Riel.
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/swap.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm_inline.h>
-#include <linux/buffer_head.h> /* for try_to_release_page() */
-#include <linux/percpu_counter.h>
-#include <linux/percpu.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <linux/backing-dev.h>
-#include <linux/memcontrol.h>
-
-#include "internal.h"
-
-/* How many pages do we try to swap or page in/out together? */
-int page_cluster;
-
-static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
-
-/*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs. But it gets used by networking.
- */
-static void __page_cache_release(struct page *page)
-{
- if (PageLRU(page)) {
- unsigned long flags;
- struct zone *zone = page_zone(page);
-
- spin_lock_irqsave(&zone->lru_lock, flags);
- VM_BUG_ON(!PageLRU(page));
- __ClearPageLRU(page);
- del_page_from_lru(zone, page);
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- }
- free_hot_page(page);
-}
-
-static void put_compound_page(struct page *page)
-{
- page = compound_head(page);
- if (put_page_testzero(page)) {
- compound_page_dtor *dtor;
-
- dtor = get_compound_page_dtor(page);
- (*dtor)(page);
- }
-}
-
-void put_page(struct page *page)
-{
- if (unlikely(PageCompound(page)))
- put_compound_page(page);
- else if (put_page_testzero(page))
- __page_cache_release(page);
-}
-EXPORT_SYMBOL(put_page);
-
-/**
- * put_pages_list() - release a list of pages
- * @pages: list of pages threaded on page->lru
- *
- * Release a list of pages which are strung together on page.lru. Currently
- * used by read_cache_pages() and related error recovery code.
- */
-void put_pages_list(struct list_head *pages)
-{
- while (!list_empty(pages)) {
- struct page *victim;
-
- victim = list_entry(pages->prev, struct page, lru);
- list_del(&victim->lru);
- page_cache_release(victim);
- }
-}
-EXPORT_SYMBOL(put_pages_list);
-
-/*
- * pagevec_move_tail() must be called with IRQ disabled.
- * Otherwise this may cause nasty races.
- */
-static void pagevec_move_tail(struct pagevec *pvec)
-{
- int i;
- int pgmoved = 0;
- struct zone *zone = NULL;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
-
- if (pagezone != zone) {
- if (zone)
- spin_unlock(&zone->lru_lock);
- zone = pagezone;
- spin_lock(&zone->lru_lock);
- }
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int lru = page_is_file_cache(page);
- list_move_tail(&page->lru, &zone->lru[lru].list);
- pgmoved++;
- }
- }
- if (zone)
- spin_unlock(&zone->lru_lock);
- __count_vm_events(PGROTATED, pgmoved);
- release_pages(pvec->pages, pvec->nr, pvec->cold);
- pagevec_reinit(pvec);
-}
-
-/*
- * Writeback is about to end against a page which has been marked for immediate
- * reclaim. If it still appears to be reclaimable, move it to the tail of the
- * inactive list.
- */
-void rotate_reclaimable_page(struct page *page)
-{
- if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
- !PageUnevictable(page) && PageLRU(page)) {
- struct pagevec *pvec;
- unsigned long flags;
-
- page_cache_get(page);
- local_irq_save(flags);
- pvec = &__get_cpu_var(lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
- pagevec_move_tail(pvec);
- local_irq_restore(flags);
- }
-}
-
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
- int file, int rotated)
-{
- struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
- struct zone_reclaim_stat *memcg_reclaim_stat;
-
- memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
-
- reclaim_stat->recent_scanned[file]++;
- if (rotated)
- reclaim_stat->recent_rotated[file]++;
-
- if (!memcg_reclaim_stat)
- return;
-
- memcg_reclaim_stat->recent_scanned[file]++;
- if (rotated)
- memcg_reclaim_stat->recent_rotated[file]++;
-}
-
-/*
- * FIXME: speed this up?
- */
-void activate_page(struct page *page)
-{
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int file = page_is_file_cache(page);
- int lru = LRU_BASE + file;
- del_page_from_lru_list(zone, page, lru);
-
- SetPageActive(page);
- lru += LRU_ACTIVE;
- add_page_to_lru_list(zone, page, lru);
- __count_vm_event(PGACTIVATE);
-
- update_page_reclaim_stat(zone, page, !!file, 1);
- }
- spin_unlock_irq(&zone->lru_lock);
-}
-
-/*
- * Mark a page as having seen activity.
- *
- * inactive,unreferenced -> inactive,referenced
- * inactive,referenced -> active,unreferenced
- * active,unreferenced -> active,referenced
- */
-void mark_page_accessed(struct page *page)
-{
- if (!PageActive(page) && !PageUnevictable(page) &&
- PageReferenced(page) && PageLRU(page)) {
- activate_page(page);
- ClearPageReferenced(page);
- } else if (!PageReferenced(page)) {
- SetPageReferenced(page);
- }
-}
-
-EXPORT_SYMBOL(mark_page_accessed);
-
-void __lru_cache_add(struct page *page, enum lru_list lru)
-{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
-
- page_cache_get(page);
- if (!pagevec_add(pvec, page))
- ____pagevec_lru_add(pvec, lru);
- put_cpu_var(lru_add_pvecs);
-}
-
-/**
- * lru_cache_add_lru - add a page to a page list
- * @page: the page to be added to the LRU.
- * @lru: the LRU list to which the page is added.
- */
-void lru_cache_add_lru(struct page *page, enum lru_list lru)
-{
- if (PageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
- ClearPageActive(page);
- } else if (PageUnevictable(page)) {
- VM_BUG_ON(PageActive(page));
- ClearPageUnevictable(page);
- }
-
- VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
- __lru_cache_add(page, lru);
-}
-
-/**
- * add_page_to_unevictable_list - add a page to the unevictable list
- * @page: the page to be added to the unevictable list
- *
- * Add page directly to its zone's unevictable list. To avoid races with
- * tasks that might be making the page evictable, through eg. munlock,
- * munmap or exit, while it's not on the lru, we want to add the page
- * while it's locked or otherwise "invisible" to other tasks. This is
- * difficult to do when using the pagevec cache, so bypass that.
- */
-void add_page_to_unevictable_list(struct page *page)
-{
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- SetPageUnevictable(page);
- SetPageLRU(page);
- add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
- spin_unlock_irq(&zone->lru_lock);
-}
-
-/*
- * Drain pages out of the cpu's pagevecs.
- * Either "cpu" is the current CPU, and preemption has already been
- * disabled; or "cpu" is being hot-unplugged, and is already dead.
- */
-static void drain_cpu_pagevecs(int cpu)
-{
- struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
- struct pagevec *pvec;
- int lru;
-
- for_each_lru(lru) {
- pvec = &pvecs[lru - LRU_BASE];
- if (pagevec_count(pvec))
- ____pagevec_lru_add(pvec, lru);
- }
-
- pvec = &per_cpu(lru_rotate_pvecs, cpu);
- if (pagevec_count(pvec)) {
- unsigned long flags;
-
- /* No harm done if a racing interrupt already did this */
- local_irq_save(flags);
- pagevec_move_tail(pvec);
- local_irq_restore(flags);
- }
-}
-
-void lru_add_drain(void)
-{
- drain_cpu_pagevecs(get_cpu());
- put_cpu();
-}
-
-static void lru_add_drain_per_cpu(struct work_struct *dummy)
-{
- lru_add_drain();
-}
-
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
-{
- return schedule_on_each_cpu(lru_add_drain_per_cpu);
-}
-
-/*
- * Batched page_cache_release(). Decrement the reference count on all the
- * passed pages. If it fell to zero then remove the page from the LRU and
- * free it.
- *
- * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
- * for the remainder of the operation.
- *
- * The locking in this function is against shrink_inactive_list(): we recheck
- * the page count inside the lock to see whether shrink_inactive_list()
- * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
- * will free it.
- */
-void release_pages(struct page **pages, int nr, int cold)
-{
- int i;
- struct pagevec pages_to_free;
- struct zone *zone = NULL;
- unsigned long uninitialized_var(flags);
-
- pagevec_init(&pages_to_free, cold);
- for (i = 0; i < nr; i++) {
- struct page *page = pages[i];
-
- if (unlikely(PageCompound(page))) {
- if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
- }
- put_compound_page(page);
- continue;
- }
-
- if (!put_page_testzero(page))
- continue;
-
- if (PageLRU(page)) {
- struct zone *pagezone = page_zone(page);
-
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock,
- flags);
- zone = pagezone;
- spin_lock_irqsave(&zone->lru_lock, flags);
- }
- VM_BUG_ON(!PageLRU(page));
- __ClearPageLRU(page);
- del_page_from_lru(zone, page);
- }
-
- if (!pagevec_add(&pages_to_free, page)) {
- if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
- }
- __pagevec_free(&pages_to_free);
- pagevec_reinit(&pages_to_free);
- }
- }
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
-
- pagevec_free(&pages_to_free);
-}
-
-/*
- * The pages which we're about to release may be in the deferred lru-addition
- * queues. That would prevent them from really being freed right now. That's
- * OK from a correctness point of view but is inefficient - those pages may be
- * cache-warm and we want to give them back to the page allocator ASAP.
- *
- * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
- * and __pagevec_lru_add_active() call release_pages() directly to avoid
- * mutual recursion.
- */
-void __pagevec_release(struct pagevec *pvec)
-{
- lru_add_drain();
- release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
- pagevec_reinit(pvec);
-}
-
-EXPORT_SYMBOL(__pagevec_release);
-
-/*
- * Add the passed pages to the LRU, then drop the caller's refcount
- * on them. Reinitialises the caller's pagevec.
- */
-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
-{
- int i;
- struct zone *zone = NULL;
-
- VM_BUG_ON(is_unevictable_lru(lru));
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
- int file;
- int active;
-
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
- }
- VM_BUG_ON(PageActive(page));
- VM_BUG_ON(PageUnevictable(page));
- VM_BUG_ON(PageLRU(page));
- SetPageLRU(page);
- active = is_active_lru(lru);
- file = is_file_lru(lru);
- if (active)
- SetPageActive(page);
- update_page_reclaim_stat(zone, page, file, active);
- add_page_to_lru_list(zone, page, lru);
- }
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- release_pages(pvec->pages, pvec->nr, pvec->cold);
- pagevec_reinit(pvec);
-}
-
-EXPORT_SYMBOL(____pagevec_lru_add);
-
-/*
- * Try to drop buffers from the pages in a pagevec
- */
-void pagevec_strip(struct pagevec *pvec)
-{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
-
- if (PagePrivate(page) && trylock_page(page)) {
- if (PagePrivate(page))
- try_to_release_page(page, 0);
- unlock_page(page);
- }
- }
-}
-
-/**
- * pagevec_swap_free - try to free swap space from the pages in a pagevec
- * @pvec: pagevec with swapcache pages to free the swap space of
- *
- * The caller needs to hold an extra reference to each page and
- * not hold the page lock on the pages. This function uses a
- * trylock on the page lock so it may not always free the swap
- * space associated with a page.
- */
-void pagevec_swap_free(struct pagevec *pvec)
-{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
-
- if (PageSwapCache(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
- }
- }
-}
-
-/**
- * pagevec_lookup - gang pagecache lookup
- * @pvec: Where the resulting pages are placed
- * @mapping: The address_space to search
- * @start: The starting page index
- * @nr_pages: The maximum number of pages
- *
- * pagevec_lookup() will search for and return a group of up to @nr_pages pages
- * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
- * reference against the pages in @pvec.
- *
- * The search returns a group of mapping-contiguous pages with ascending
- * indexes. There may be holes in the indices due to not-present pages.
- *
- * pagevec_lookup() returns the number of pages which were found.
- */
-unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t start, unsigned nr_pages)
-{
- pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
- return pagevec_count(pvec);
-}
-
-EXPORT_SYMBOL(pagevec_lookup);
-
-unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t *index, int tag, unsigned nr_pages)
-{
- pvec->nr = find_get_pages_tag(mapping, index, tag,
- nr_pages, pvec->pages);
- return pagevec_count(pvec);
-}
-
-EXPORT_SYMBOL(pagevec_lookup_tag);
-
-#ifdef CONFIG_SMP
-/*
- * We tolerate a little inaccuracy to avoid ping-ponging the counter between
- * CPUs
- */
-#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
-
-static DEFINE_PER_CPU(long, committed_space);
-
-void vm_acct_memory(long pages)
-{
- long *local;
-
- preempt_disable();
- local = &__get_cpu_var(committed_space);
- *local += pages;
- if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
- atomic_long_add(*local, &vm_committed_space);
- *local = 0;
- }
- preempt_enable();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Drop the CPU's cached committed space back into the central pool. */
-static int cpu_swap_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- long *committed;
-
- committed = &per_cpu(committed_space, (long)hcpu);
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- atomic_long_add(*committed, &vm_committed_space);
- *committed = 0;
- drain_cpu_pagevecs((long)hcpu);
- }
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-#endif /* CONFIG_SMP */
-
-/*
- * Perform any setup for the swap system
- */
-void __init swap_setup(void)
-{
- unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
-
-#ifdef CONFIG_SWAP
- bdi_init(swapper_space.backing_dev_info);
-#endif
-
- /* Use a smaller cluster for small-memory machines */
- if (megs < 16)
- page_cluster = 2;
- else
- page_cluster = 3;
- /*
- * Right now other parts of the system means that we
- * _really_ don't want to cluster much more
- */
-#ifdef CONFIG_HOTPLUG_CPU
- hotcpu_notifier(cpu_swap_callback, 0);
-#endif
-}
diff --git a/libdde_linux26/contrib/mm/.svn/text-base/util.c.svn-base b/libdde_linux26/contrib/mm/.svn/text-base/util.c.svn-base
deleted file mode 100644
index 37eaccdf..00000000
--- a/libdde_linux26/contrib/mm/.svn/text-base/util.c.svn-base
+++ /dev/null
@@ -1,208 +0,0 @@
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <asm/uaccess.h>
-
-/**
- * kstrdup - allocate space for and copy an existing string
- * @s: the string to duplicate
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
-char *kstrdup(const char *s, gfp_t gfp)
-{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strlen(s) + 1;
- buf = kmalloc_track_caller(len, gfp);
- if (buf)
- memcpy(buf, s, len);
- return buf;
-}
-EXPORT_SYMBOL(kstrdup);
-
-/**
- * kstrndup - allocate space for and copy an existing string
- * @s: the string to duplicate
- * @max: read at most @max chars from @s
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
-char *kstrndup(const char *s, size_t max, gfp_t gfp)
-{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strnlen(s, max);
- buf = kmalloc_track_caller(len+1, gfp);
- if (buf) {
- memcpy(buf, s, len);
- buf[len] = '\0';
- }
- return buf;
-}
-EXPORT_SYMBOL(kstrndup);
-
-/**
- * kmemdup - duplicate region of memory
- *
- * @src: memory region to duplicate
- * @len: memory region length
- * @gfp: GFP mask to use
- */
-void *kmemdup(const void *src, size_t len, gfp_t gfp)
-{
- void *p;
-
- p = kmalloc_track_caller(len, gfp);
- if (p)
- memcpy(p, src, len);
- return p;
-}
-EXPORT_SYMBOL(kmemdup);
-
-/**
- * __krealloc - like krealloc() but don't free @p.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * This function is like krealloc() except it never frees the originally
- * allocated buffer. Use this if you don't want to free the buffer immediately
- * like, for example, with RCU.
- */
-void *__krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
- size_t ks = 0;
-
- if (unlikely(!new_size))
- return ZERO_SIZE_PTR;
-
- if (p)
- ks = ksize(p);
-
- if (ks >= new_size)
- return (void *)p;
-
- ret = kmalloc_track_caller(new_size, flags);
- if (ret && p)
- memcpy(ret, p, ks);
-
- return ret;
-}
-EXPORT_SYMBOL(__krealloc);
-
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
-
- if (unlikely(!new_size)) {
- kfree(p);
- return ZERO_SIZE_PTR;
- }
-
- ret = __krealloc(p, new_size, flags);
- if (ret && p != ret)
- kfree(p);
-
- return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
-/**
- * kzfree - like kfree but zero memory
- * @p: object to free memory of
- *
- * The memory of the object @p points to is zeroed before freed.
- * If @p is %NULL, kzfree() does nothing.
- */
-void kzfree(const void *p)
-{
- size_t ks;
- void *mem = (void *)p;
-
- if (unlikely(ZERO_OR_NULL_PTR(mem)))
- return;
- ks = ksize(mem);
- memset(mem, 0, ks);
- kfree(mem);
-}
-EXPORT_SYMBOL(kzfree);
-
-/*
- * strndup_user - duplicate an existing string from user space
- * @s: The string to duplicate
- * @n: Maximum number of bytes to copy, including the trailing NUL.
- */
-char *strndup_user(const char __user *s, long n)
-{
- char *p;
- long length;
-
- length = strnlen_user(s, n);
-
- if (!length)
- return ERR_PTR(-EFAULT);
-
- if (length > n)
- return ERR_PTR(-EINVAL);
-
- p = kmalloc(length, GFP_KERNEL);
-
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(p, s, length)) {
- kfree(p);
- return ERR_PTR(-EFAULT);
- }
-
- p[length - 1] = '\0';
-
- return p;
-}
-EXPORT_SYMBOL(strndup_user);
-
-#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
-}
-#endif
-
-int __attribute__((weak)) get_user_pages_fast(unsigned long start,
- int nr_pages, int write, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- int ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start, nr_pages,
- write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(get_user_pages_fast);