diff options
| author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2013-07-27 22:07:53 +0000 |
|---|---|---|
| committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2013-07-27 22:07:53 +0000 |
| commit | 4fbe7358c7747a9165f776eb19addbb9baf7def2 (patch) | |
| tree | bc7076b4f6d10c2cc2942539bb666e50f0b66954 /libdde_linux26/lib/src/fs | |
| parent | 21adb5284111190057db245cfc2b54091920c373 (diff) | |
rename libdde_linux26 into libdde-linux26 to make dpkg-source happy
Diffstat (limited to 'libdde_linux26/lib/src/fs')
| -rw-r--r-- | libdde_linux26/lib/src/fs/block_dev.c | 1422 | ||||
| -rw-r--r-- | libdde_linux26/lib/src/fs/buffer.c | 3474 | ||||
| -rw-r--r-- | libdde_linux26/lib/src/fs/char_dev.c | 572 |
3 files changed, 0 insertions, 5468 deletions
diff --git a/libdde_linux26/lib/src/fs/block_dev.c b/libdde_linux26/lib/src/fs/block_dev.c deleted file mode 100644 index 4c4c2f64..00000000 --- a/libdde_linux26/lib/src/fs/block_dev.c +++ /dev/null @@ -1,1422 +0,0 @@ -/* - * linux/fs/block_dev.c - * - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE - */ - -#include <linux/init.h> -#include <linux/mm.h> -#include <linux/fcntl.h> -#include <linux/slab.h> -#include <linux/kmod.h> -#include <linux/major.h> -#include <linux/smp_lock.h> -#include <linux/device_cgroup.h> -#include <linux/highmem.h> -#include <linux/blkdev.h> -#include <linux/module.h> -#include <linux/blkpg.h> -#include <linux/buffer_head.h> -#include <linux/writeback.h> -#include <linux/mpage.h> -#include <linux/mount.h> -#include <linux/uio.h> -#include <linux/namei.h> -#include <linux/log2.h> -#include <asm/uaccess.h> -#include "internal.h" - -#ifdef DDE_LINUX -#include "local.h" -#endif - -struct bdev_inode { - struct block_device bdev; - struct inode vfs_inode; -}; - -static const struct address_space_operations def_blk_aops; - -static inline struct bdev_inode *BDEV_I(struct inode *inode) -{ - return container_of(inode, struct bdev_inode, vfs_inode); -} - -inline struct block_device *I_BDEV(struct inode *inode) -{ - return &BDEV_I(inode)->bdev; -} - -EXPORT_SYMBOL(I_BDEV); - -static sector_t max_block(struct block_device *bdev) -{ - sector_t retval = ~((sector_t)0); - loff_t sz = i_size_read(bdev->bd_inode); - - if (sz) { - unsigned int size = block_size(bdev); - unsigned int sizebits = blksize_bits(size); - retval = (sz >> sizebits); - } - return retval; -} - -/* Kill _all_ buffers and pagecache , dirty or not.. */ -static void kill_bdev(struct block_device *bdev) -{ - if (bdev->bd_inode->i_mapping->nrpages == 0) - return; - invalidate_bh_lrus(); - truncate_inode_pages(bdev->bd_inode->i_mapping, 0); -} - -int set_blocksize(struct block_device *bdev, int size) -{ - /* Size must be a power of two, and between 512 and PAGE_SIZE */ - if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) - return -EINVAL; - - /* Size cannot be smaller than the size supported by the device */ - if (size < bdev_hardsect_size(bdev)) - return -EINVAL; - - /* Don't change the size if it is same as current */ - if (bdev->bd_block_size != size) { - sync_blockdev(bdev); - bdev->bd_block_size = size; - bdev->bd_inode->i_blkbits = blksize_bits(size); - kill_bdev(bdev); - } - return 0; -} - -EXPORT_SYMBOL(set_blocksize); - -int sb_set_blocksize(struct super_block *sb, int size) -{ - if (set_blocksize(sb->s_bdev, size)) - return 0; - /* If we get here, we know size is power of two - * and it's value is between 512 and PAGE_SIZE */ - sb->s_blocksize = size; - sb->s_blocksize_bits = blksize_bits(size); - return sb->s_blocksize; -} - -EXPORT_SYMBOL(sb_set_blocksize); - -int sb_min_blocksize(struct super_block *sb, int size) -{ - int minsize = bdev_hardsect_size(sb->s_bdev); - if (size < minsize) - size = minsize; - return sb_set_blocksize(sb, size); -} - -EXPORT_SYMBOL(sb_min_blocksize); - -static int -blkdev_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - if (iblock >= max_block(I_BDEV(inode))) { - if (create) - return -EIO; - - /* - * for reads, we're just trying to fill a partial page. - * return a hole, they will have to call get_block again - * before they can fill it, and they will get -EIO at that - * time - */ - return 0; - } - bh->b_bdev = I_BDEV(inode); - bh->b_blocknr = iblock; - set_buffer_mapped(bh); - return 0; -} - -static int -blkdev_get_blocks(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - sector_t end_block = max_block(I_BDEV(inode)); - unsigned long max_blocks = bh->b_size >> inode->i_blkbits; - - if ((iblock + max_blocks) > end_block) { - max_blocks = end_block - iblock; - if ((long)max_blocks <= 0) { - if (create) - return -EIO; /* write fully beyond EOF */ - /* - * It is a read which is fully beyond EOF. We return - * a !buffer_mapped buffer - */ - max_blocks = 0; - } - } - - bh->b_bdev = I_BDEV(inode); - bh->b_blocknr = iblock; - bh->b_size = max_blocks << inode->i_blkbits; - if (max_blocks) - set_buffer_mapped(bh); - return 0; -} - -static ssize_t -blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, - loff_t offset, unsigned long nr_segs) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - -#ifndef DDE_LINUX - return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), - iov, offset, nr_segs, blkdev_get_blocks, NULL); -#else - WARN_UNIMPL; - return 0; -#endif /* DDE_LINUX */ -} - -static int blkdev_writepage(struct page *page, struct writeback_control *wbc) -{ - return block_write_full_page(page, blkdev_get_block, wbc); -} - -static int blkdev_readpage(struct file * file, struct page * page) -{ - return block_read_full_page(page, blkdev_get_block); -} - -static int blkdev_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ - *pagep = NULL; - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - blkdev_get_block); -} - -static int blkdev_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - int ret; - ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); - - unlock_page(page); - page_cache_release(page); - - return ret; -} - -/* - * private llseek: - * for a block special file file->f_path.dentry->d_inode->i_size is zero - * so we compute the size by hand (just as in block_read/write above) - */ -static loff_t block_llseek(struct file *file, loff_t offset, int origin) -{ - struct inode *bd_inode = file->f_mapping->host; - loff_t size; - loff_t retval; - - mutex_lock(&bd_inode->i_mutex); - size = i_size_read(bd_inode); - - switch (origin) { - case 2: - offset += size; - break; - case 1: - offset += file->f_pos; - } - retval = -EINVAL; - if (offset >= 0 && offset <= size) { - if (offset != file->f_pos) { - file->f_pos = offset; - } - retval = offset; - } - mutex_unlock(&bd_inode->i_mutex); - return retval; -} - -/* - * Filp is never NULL; the only case when ->fsync() is called with - * NULL first argument is nfsd_sync_dir() and that's not a directory. - */ - -static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) -{ - return sync_blockdev(I_BDEV(filp->f_mapping->host)); -} - -/* - * pseudo-fs - */ - -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static struct kmem_cache * bdev_cachep __read_mostly; - -static struct inode *bdev_alloc_inode(struct super_block *sb) -{ - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); - if (!ei) - return NULL; - return &ei->vfs_inode; -} - -static void bdev_destroy_inode(struct inode *inode) -{ - struct bdev_inode *bdi = BDEV_I(inode); - - bdi->bdev.bd_inode_backing_dev_info = NULL; - kmem_cache_free(bdev_cachep, bdi); -} - -static void init_once(void *foo) -{ - struct bdev_inode *ei = (struct bdev_inode *) foo; - struct block_device *bdev = &ei->bdev; - - memset(bdev, 0, sizeof(*bdev)); - mutex_init(&bdev->bd_mutex); - sema_init(&bdev->bd_mount_sem, 1); - INIT_LIST_HEAD(&bdev->bd_inodes); - INIT_LIST_HEAD(&bdev->bd_list); -#ifdef CONFIG_SYSFS - INIT_LIST_HEAD(&bdev->bd_holder_list); -#endif - inode_init_once(&ei->vfs_inode); - /* Initialize mutex for freeze. */ - mutex_init(&bdev->bd_fsfreeze_mutex); -} - -static inline void __bd_forget(struct inode *inode) -{ - list_del_init(&inode->i_devices); - inode->i_bdev = NULL; - inode->i_mapping = &inode->i_data; -} - -static void bdev_clear_inode(struct inode *inode) -{ - struct block_device *bdev = &BDEV_I(inode)->bdev; - struct list_head *p; - spin_lock(&bdev_lock); - while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { - __bd_forget(list_entry(p, struct inode, i_devices)); - } - list_del_init(&bdev->bd_list); - spin_unlock(&bdev_lock); -} - -static const struct super_operations bdev_sops = { - .statfs = simple_statfs, - .alloc_inode = bdev_alloc_inode, - .destroy_inode = bdev_destroy_inode, - .drop_inode = generic_delete_inode, - .clear_inode = bdev_clear_inode, -}; - -static int bd_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data, struct vfsmount *mnt) -{ - return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt); -} - -static struct file_system_type bd_type = { - .name = "bdev", - .get_sb = bd_get_sb, - .kill_sb = kill_anon_super, -}; - -struct super_block *blockdev_superblock __read_mostly; - -void __init bdev_cache_init(void) -{ - int err; - struct vfsmount *bd_mnt; - - bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), - 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - init_once); - err = register_filesystem(&bd_type); - if (err) - panic("Cannot register bdev pseudo-fs"); - bd_mnt = kern_mount(&bd_type); - if (IS_ERR(bd_mnt)) - panic("Cannot create bdev pseudo-fs"); - blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ -} - -/* - * Most likely _very_ bad one - but then it's hardly critical for small - * /dev and can be fixed when somebody will need really large one. - * Keep in mind that it will be fed through icache hash function too. - */ -static inline unsigned long hash(dev_t dev) -{ - return MAJOR(dev)+MINOR(dev); -} - -static int bdev_test(struct inode *inode, void *data) -{ - return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; -} - -static int bdev_set(struct inode *inode, void *data) -{ - BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; - return 0; -} - -static LIST_HEAD(all_bdevs); - -struct block_device *bdget(dev_t dev) -{ - struct block_device *bdev; - struct inode *inode; - - printk_all_partitions(); - - inode = iget5_locked(blockdev_superblock, hash(dev), - bdev_test, bdev_set, &dev); - - if (!inode) - return NULL; - - bdev = &BDEV_I(inode)->bdev; - - if (inode->i_state & I_NEW) { - bdev->bd_contains = NULL; - bdev->bd_inode = inode; - bdev->bd_block_size = (1 << inode->i_blkbits); - bdev->bd_part_count = 0; - bdev->bd_invalidated = 0; - inode->i_mode = S_IFBLK; - inode->i_rdev = dev; - inode->i_bdev = bdev; - inode->i_data.a_ops = &def_blk_aops; - mapping_set_gfp_mask(&inode->i_data, GFP_USER); - inode->i_data.backing_dev_info = &default_backing_dev_info; - spin_lock(&bdev_lock); - list_add(&bdev->bd_list, &all_bdevs); - spin_unlock(&bdev_lock); - unlock_new_inode(inode); - } - return bdev; -} - -EXPORT_SYMBOL(bdget); - -long nr_blockdev_pages(void) -{ - struct block_device *bdev; - long ret = 0; - spin_lock(&bdev_lock); - list_for_each_entry(bdev, &all_bdevs, bd_list) { - ret += bdev->bd_inode->i_mapping->nrpages; - } - spin_unlock(&bdev_lock); - return ret; -} - -void bdput(struct block_device *bdev) -{ - iput(bdev->bd_inode); -} - -EXPORT_SYMBOL(bdput); - -static struct block_device *bd_acquire(struct inode *inode) -{ - struct block_device *bdev; - - spin_lock(&bdev_lock); - bdev = inode->i_bdev; - if (bdev) { - atomic_inc(&bdev->bd_inode->i_count); - spin_unlock(&bdev_lock); - return bdev; - } - spin_unlock(&bdev_lock); - - bdev = bdget(inode->i_rdev); - if (bdev) { - spin_lock(&bdev_lock); - if (!inode->i_bdev) { - /* - * We take an additional bd_inode->i_count for inode, - * and it's released in clear_inode() of inode. - * So, we can access it via ->i_mapping always - * without igrab(). - */ - atomic_inc(&bdev->bd_inode->i_count); - inode->i_bdev = bdev; - inode->i_mapping = bdev->bd_inode->i_mapping; - list_add(&inode->i_devices, &bdev->bd_inodes); - } - spin_unlock(&bdev_lock); - } - return bdev; -} - -/* Call when you free inode */ - -void bd_forget(struct inode *inode) -{ - struct block_device *bdev = NULL; - - spin_lock(&bdev_lock); - if (inode->i_bdev) { - if (!sb_is_blkdev_sb(inode->i_sb)) - bdev = inode->i_bdev; - __bd_forget(inode); - } - spin_unlock(&bdev_lock); - - if (bdev) - iput(bdev->bd_inode); -} - -int bd_claim(struct block_device *bdev, void *holder) -{ - int res; - spin_lock(&bdev_lock); - - /* first decide result */ - if (bdev->bd_holder == holder) - res = 0; /* already a holder */ - else if (bdev->bd_holder != NULL) - res = -EBUSY; /* held by someone else */ - else if (bdev->bd_contains == bdev) - res = 0; /* is a whole device which isn't held */ - - else if (bdev->bd_contains->bd_holder == bd_claim) - res = 0; /* is a partition of a device that is being partitioned */ - else if (bdev->bd_contains->bd_holder != NULL) - res = -EBUSY; /* is a partition of a held device */ - else - res = 0; /* is a partition of an un-held device */ - - /* now impose change */ - if (res==0) { - /* note that for a whole device bd_holders - * will be incremented twice, and bd_holder will - * be set to bd_claim before being set to holder - */ - bdev->bd_contains->bd_holders ++; - bdev->bd_contains->bd_holder = bd_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; - } - spin_unlock(&bdev_lock); - return res; -} - -EXPORT_SYMBOL(bd_claim); - -void bd_release(struct block_device *bdev) -{ - spin_lock(&bdev_lock); - if (!--bdev->bd_contains->bd_holders) - bdev->bd_contains->bd_holder = NULL; - if (!--bdev->bd_holders) - bdev->bd_holder = NULL; - spin_unlock(&bdev_lock); -} - -EXPORT_SYMBOL(bd_release); - -#ifdef CONFIG_SYSFS -/* - * Functions for bd_claim_by_kobject / bd_release_from_kobject - * - * If a kobject is passed to bd_claim_by_kobject() - * and the kobject has a parent directory, - * following symlinks are created: - * o from the kobject to the claimed bdev - * o from "holders" directory of the bdev to the parent of the kobject - * bd_release_from_kobject() removes these symlinks. - * - * Example: - * If /dev/dm-0 maps to /dev/sda, kobject corresponding to - * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: - * /sys/block/dm-0/slaves/sda --> /sys/block/sda - * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 - */ - -static int add_symlink(struct kobject *from, struct kobject *to) -{ - if (!from || !to) - return 0; - return sysfs_create_link(from, to, kobject_name(to)); -} - -static void del_symlink(struct kobject *from, struct kobject *to) -{ - if (!from || !to) - return; - sysfs_remove_link(from, kobject_name(to)); -} - -/* - * 'struct bd_holder' contains pointers to kobjects symlinked by - * bd_claim_by_kobject. - * It's connected to bd_holder_list which is protected by bdev->bd_sem. - */ -struct bd_holder { - struct list_head list; /* chain of holders of the bdev */ - int count; /* references from the holder */ - struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ - struct kobject *hdev; /* e.g. "/block/dm-0" */ - struct kobject *hdir; /* e.g. "/block/sda/holders" */ - struct kobject *sdev; /* e.g. "/block/sda" */ -}; - -/* - * Get references of related kobjects at once. - * Returns 1 on success. 0 on failure. - * - * Should call bd_holder_release_dirs() after successful use. - */ -static int bd_holder_grab_dirs(struct block_device *bdev, - struct bd_holder *bo) -{ - if (!bdev || !bo) - return 0; - - bo->sdir = kobject_get(bo->sdir); - if (!bo->sdir) - return 0; - - bo->hdev = kobject_get(bo->sdir->parent); - if (!bo->hdev) - goto fail_put_sdir; - - bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj); - if (!bo->sdev) - goto fail_put_hdev; - - bo->hdir = kobject_get(bdev->bd_part->holder_dir); - if (!bo->hdir) - goto fail_put_sdev; - - return 1; - -fail_put_sdev: - kobject_put(bo->sdev); -fail_put_hdev: - kobject_put(bo->hdev); -fail_put_sdir: - kobject_put(bo->sdir); - - return 0; -} - -/* Put references of related kobjects at once. */ -static void bd_holder_release_dirs(struct bd_holder *bo) -{ - kobject_put(bo->hdir); - kobject_put(bo->sdev); - kobject_put(bo->hdev); - kobject_put(bo->sdir); -} - -static struct bd_holder *alloc_bd_holder(struct kobject *kobj) -{ - struct bd_holder *bo; - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return NULL; - - bo->count = 1; - bo->sdir = kobj; - - return bo; -} - -static void free_bd_holder(struct bd_holder *bo) -{ - kfree(bo); -} - -/** - * find_bd_holder - find matching struct bd_holder from the block device - * - * @bdev: struct block device to be searched - * @bo: target struct bd_holder - * - * Returns matching entry with @bo in @bdev->bd_holder_list. - * If found, increment the reference count and return the pointer. - * If not found, returns NULL. - */ -static struct bd_holder *find_bd_holder(struct block_device *bdev, - struct bd_holder *bo) -{ - struct bd_holder *tmp; - - list_for_each_entry(tmp, &bdev->bd_holder_list, list) - if (tmp->sdir == bo->sdir) { - tmp->count++; - return tmp; - } - - return NULL; -} - -/** - * add_bd_holder - create sysfs symlinks for bd_claim() relationship - * - * @bdev: block device to be bd_claimed - * @bo: preallocated and initialized by alloc_bd_holder() - * - * Add @bo to @bdev->bd_holder_list, create symlinks. - * - * Returns 0 if symlinks are created. - * Returns -ve if something fails. - */ -static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) -{ - int err; - - if (!bo) - return -EINVAL; - - if (!bd_holder_grab_dirs(bdev, bo)) - return -EBUSY; - - err = add_symlink(bo->sdir, bo->sdev); - if (err) - return err; - - err = add_symlink(bo->hdir, bo->hdev); - if (err) { - del_symlink(bo->sdir, bo->sdev); - return err; - } - - list_add_tail(&bo->list, &bdev->bd_holder_list); - return 0; -} - -/** - * del_bd_holder - delete sysfs symlinks for bd_claim() relationship - * - * @bdev: block device to be bd_claimed - * @kobj: holder's kobject - * - * If there is matching entry with @kobj in @bdev->bd_holder_list - * and no other bd_claim() from the same kobject, - * remove the struct bd_holder from the list, delete symlinks for it. - * - * Returns a pointer to the struct bd_holder when it's removed from the list - * and ready to be freed. - * Returns NULL if matching claim isn't found or there is other bd_claim() - * by the same kobject. - */ -static struct bd_holder *del_bd_holder(struct block_device *bdev, - struct kobject *kobj) -{ - struct bd_holder *bo; - - list_for_each_entry(bo, &bdev->bd_holder_list, list) { - if (bo->sdir == kobj) { - bo->count--; - BUG_ON(bo->count < 0); - if (!bo->count) { - list_del(&bo->list); - del_symlink(bo->sdir, bo->sdev); - del_symlink(bo->hdir, bo->hdev); - bd_holder_release_dirs(bo); - return bo; - } - break; - } - } - - return NULL; -} - -/** - * bd_claim_by_kobject - bd_claim() with additional kobject signature - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @kobj: holder's kobject - * - * Do bd_claim() and if it succeeds, create sysfs symlinks between - * the bdev and the holder's kobject. - * Use bd_release_from_kobject() when relesing the claimed bdev. - * - * Returns 0 on success. (same as bd_claim()) - * Returns errno on failure. - */ -static int bd_claim_by_kobject(struct block_device *bdev, void *holder, - struct kobject *kobj) -{ - int err; - struct bd_holder *bo, *found; - - if (!kobj) - return -EINVAL; - - bo = alloc_bd_holder(kobj); - if (!bo) - return -ENOMEM; - - mutex_lock(&bdev->bd_mutex); - - err = bd_claim(bdev, holder); - if (err) - goto fail; - - found = find_bd_holder(bdev, bo); - if (found) - goto fail; - - err = add_bd_holder(bdev, bo); - if (err) - bd_release(bdev); - else - bo = NULL; -fail: - mutex_unlock(&bdev->bd_mutex); - free_bd_holder(bo); - return err; -} - -/** - * bd_release_from_kobject - bd_release() with additional kobject signature - * - * @bdev: block device to be released - * @kobj: holder's kobject - * - * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). - */ -static void bd_release_from_kobject(struct block_device *bdev, - struct kobject *kobj) -{ - if (!kobj) - return; - - mutex_lock(&bdev->bd_mutex); - bd_release(bdev); - free_bd_holder(del_bd_holder(bdev, kobj)); - mutex_unlock(&bdev->bd_mutex); -} - -/** - * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @disk: holder's gendisk - * - * Call bd_claim_by_kobject() with getting @disk->slave_dir. - */ -int bd_claim_by_disk(struct block_device *bdev, void *holder, - struct gendisk *disk) -{ - return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); -} -EXPORT_SYMBOL_GPL(bd_claim_by_disk); - -/** - * bd_release_from_disk - wrapper function for bd_release_from_kobject() - * - * @bdev: block device to be claimed - * @disk: holder's gendisk - * - * Call bd_release_from_kobject() and put @disk->slave_dir. - */ -void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) -{ - bd_release_from_kobject(bdev, disk->slave_dir); - kobject_put(disk->slave_dir); -} -EXPORT_SYMBOL_GPL(bd_release_from_disk); -#endif - -/* - * Tries to open block device by device number. Use it ONLY if you - * really do not have anything better - i.e. when you are behind a - * truly sucky interface and all you are given is a device number. _Never_ - * to be used for internal purposes. If you ever need it - reconsider - * your API. - */ -struct block_device *open_by_devnum(dev_t dev, fmode_t mode) -{ - struct block_device *bdev = bdget(dev); - int err = -ENOMEM; - if (bdev) - err = blkdev_get(bdev, mode); - return err ? ERR_PTR(err) : bdev; -} - -EXPORT_SYMBOL(open_by_devnum); - -/** - * flush_disk - invalidates all buffer-cache entries on a disk - * - * @bdev: struct block device to be flushed - * - * Invalidates all buffer-cache entries on a disk. It should be called - * when a disk has been changed -- either by a media change or online - * resize. - */ -static void flush_disk(struct block_device *bdev) -{ - if (__invalidate_device(bdev)) { - char name[BDEVNAME_SIZE] = ""; - - if (bdev->bd_disk) - disk_name(bdev->bd_disk, 0, name); - printk(KERN_WARNING "VFS: busy inodes on changed media or " - "resized disk %s\n", name); - } - - if (!bdev->bd_disk) - return; - if (disk_partitionable(bdev->bd_disk)) - bdev->bd_invalidated = 1; -} - -/** - * check_disk_size_change - checks for disk size change and adjusts bdev size. - * @disk: struct gendisk to check - * @bdev: struct bdev to adjust. - * - * This routine checks to see if the bdev size does not match the disk size - * and adjusts it if it differs. - */ -void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) -{ - loff_t disk_size, bdev_size; - - disk_size = (loff_t)get_capacity(disk) << 9; - bdev_size = i_size_read(bdev->bd_inode); - if (disk_size != bdev_size) { - char name[BDEVNAME_SIZE]; - - disk_name(disk, 0, name); - printk(KERN_INFO - "%s: detected capacity change from %lld to %lld\n", - name, bdev_size, disk_size); - i_size_write(bdev->bd_inode, disk_size); - flush_disk(bdev); - } -} -EXPORT_SYMBOL(check_disk_size_change); - -/** - * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back - * @disk: struct gendisk to be revalidated - * - * This routine is a wrapper for lower-level driver's revalidate_disk - * call-backs. It is used to do common pre and post operations needed - * for all revalidate_disk operations. - */ -int revalidate_disk(struct gendisk *disk) -{ - struct block_device *bdev; - int ret = 0; - - if (disk->fops->revalidate_disk) - ret = disk->fops->revalidate_disk(disk); - - bdev = bdget_disk(disk, 0); - if (!bdev) - return ret; - - mutex_lock(&bdev->bd_mutex); - check_disk_size_change(disk, bdev); - mutex_unlock(&bdev->bd_mutex); - bdput(bdev); - return ret; -} -EXPORT_SYMBOL(revalidate_disk); - -/* - * This routine checks whether a removable media has been changed, - * and invalidates all buffer-cache-entries in that case. This - * is a relatively slow routine, so we have to try to minimize using - * it. Thus it is called only upon a 'mount' or 'open'. This - * is the best way of combining speed and utility, I think. - * People changing diskettes in the middle of an operation deserve - * to lose :-) - */ -int check_disk_change(struct block_device *bdev) -{ - struct gendisk *disk = bdev->bd_disk; - struct block_device_operations * bdops = disk->fops; - - if (!bdops->media_changed) - return 0; - if (!bdops->media_changed(bdev->bd_disk)) - return 0; - - flush_disk(bdev); - if (bdops->revalidate_disk) - bdops->revalidate_disk(bdev->bd_disk); - return 1; -} - -EXPORT_SYMBOL(check_disk_change); - -void bd_set_size(struct block_device *bdev, loff_t size) -{ - unsigned bsize = bdev_hardsect_size(bdev); - - bdev->bd_inode->i_size = size; - while (bsize < PAGE_CACHE_SIZE) { - if (size & bsize) - break; - bsize <<= 1; - } - bdev->bd_block_size = bsize; - bdev->bd_inode->i_blkbits = blksize_bits(bsize); -} -EXPORT_SYMBOL(bd_set_size); - -static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); - -/* - * bd_mutex locking: - * - * mutex_lock(part->bd_mutex) - * mutex_lock_nested(whole->bd_mutex, 1) - */ - -static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) -{ - struct gendisk *disk; - int ret; - int partno; - int perm = 0; - - if (mode & FMODE_READ) - perm |= MAY_READ; - if (mode & FMODE_WRITE) - perm |= MAY_WRITE; - /* - * hooks: /n/, see "layering violations". - */ - ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); - return ret; - } - - lock_kernel(); - restart: - - ret = -ENXIO; - disk = get_gendisk(bdev->bd_dev, &partno); - if (!disk) - goto out_unlock_kernel; - - mutex_lock_nested(&bdev->bd_mutex, for_part); - if (!bdev->bd_openers) { - bdev->bd_disk = disk; - bdev->bd_contains = bdev; - if (!partno) { - struct backing_dev_info *bdi; - - ret = -ENXIO; - bdev->bd_part = disk_get_part(disk, partno); - if (!bdev->bd_part) - goto out_clear; - - if (disk->fops->open) { - ret = disk->fops->open(bdev, mode); - if (ret == -ERESTARTSYS) { - /* Lost a race with 'disk' being - * deleted, try again. - * See md.c - */ - disk_put_part(bdev->bd_part); - bdev->bd_part = NULL; - module_put(disk->fops->owner); - put_disk(disk); - bdev->bd_disk = NULL; - mutex_unlock(&bdev->bd_mutex); - goto restart; - } - if (ret) - goto out_clear; - } - if (!bdev->bd_openers) { - bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); - bdi = blk_get_backing_dev_info(bdev); - if (bdi == NULL) - bdi = &default_backing_dev_info; - bdev->bd_inode->i_data.backing_dev_info = bdi; - } - if (bdev->bd_invalidated) - rescan_partitions(disk, bdev); - } else { - struct block_device *whole; - whole = bdget_disk(disk, 0); - ret = -ENOMEM; - if (!whole) - goto out_clear; - BUG_ON(for_part); - ret = __blkdev_get(whole, mode, 1); - if (ret) - goto out_clear; - bdev->bd_contains = whole; - bdev->bd_inode->i_data.backing_dev_info = - whole->bd_inode->i_data.backing_dev_info; - bdev->bd_part = disk_get_part(disk, partno); - if (!(disk->flags & GENHD_FL_UP) || - !bdev->bd_part || !bdev->bd_part->nr_sects) { - ret = -ENXIO; - goto out_clear; - } - bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); - } - } else { - put_disk(disk); - module_put(disk->fops->owner); - disk = NULL; - if (bdev->bd_contains == bdev) { - if (bdev->bd_disk->fops->open) { - ret = bdev->bd_disk->fops->open(bdev, mode); - if (ret) - goto out_unlock_bdev; - } - if (bdev->bd_invalidated) - rescan_partitions(bdev->bd_disk, bdev); - } - } - bdev->bd_openers++; - if (for_part) - bdev->bd_part_count++; - mutex_unlock(&bdev->bd_mutex); - unlock_kernel(); - return 0; - - out_clear: - disk_put_part(bdev->bd_part); - bdev->bd_disk = NULL; - bdev->bd_part = NULL; - bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) - __blkdev_put(bdev->bd_contains, mode, 1); - bdev->bd_contains = NULL; - out_unlock_bdev: - mutex_unlock(&bdev->bd_mutex); - out_unlock_kernel: - unlock_kernel(); - - if (disk) - module_put(disk->fops->owner); - put_disk(disk); - bdput(bdev); - - return ret; -} - -int blkdev_get(struct block_device *bdev, fmode_t mode) -{ - return __blkdev_get(bdev, mode, 0); -} -EXPORT_SYMBOL(blkdev_get); - -static int blkdev_open(struct inode * inode, struct file * filp) -{ - struct block_device *bdev; - int res; - - /* - * Preserve backwards compatibility and allow large file access - * even if userspace doesn't ask for it explicitly. Some mkfs - * binary needs it. We might want to drop this workaround - * during an unstable branch. - */ - filp->f_flags |= O_LARGEFILE; - - if (filp->f_flags & O_NDELAY) - filp->f_mode |= FMODE_NDELAY; - if (filp->f_flags & O_EXCL) - filp->f_mode |= FMODE_EXCL; - if ((filp->f_flags & O_ACCMODE) == 3) - filp->f_mode |= FMODE_WRITE_IOCTL; - - bdev = bd_acquire(inode); - if (bdev == NULL) - return -ENOMEM; - - filp->f_mapping = bdev->bd_inode->i_mapping; - - res = blkdev_get(bdev, filp->f_mode); - if (res) - return res; - - if (filp->f_mode & FMODE_EXCL) { - res = bd_claim(bdev, filp); - if (res) - goto out_blkdev_put; - } - - return 0; - - out_blkdev_put: - blkdev_put(bdev, filp->f_mode); - return res; -} - -static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) -{ - int ret = 0; - struct gendisk *disk = bdev->bd_disk; - struct block_device *victim = NULL; - - mutex_lock_nested(&bdev->bd_mutex, for_part); - lock_kernel(); - if (for_part) - bdev->bd_part_count--; - - if (!--bdev->bd_openers) { - sync_blockdev(bdev); - kill_bdev(bdev); - } - if (bdev->bd_contains == bdev) { - if (disk->fops->release) - ret = disk->fops->release(disk, mode); - } - if (!bdev->bd_openers) { - struct module *owner = disk->fops->owner; - - put_disk(disk); - module_put(owner); - disk_put_part(bdev->bd_part); - bdev->bd_part = NULL; - bdev->bd_disk = NULL; - bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) - victim = bdev->bd_contains; - bdev->bd_contains = NULL; - } - unlock_kernel(); - mutex_unlock(&bdev->bd_mutex); - bdput(bdev); - if (victim) - __blkdev_put(victim, mode, 1); - return ret; -} - -int blkdev_put(struct block_device *bdev, fmode_t mode) -{ - return __blkdev_put(bdev, mode, 0); -} -EXPORT_SYMBOL(blkdev_put); - -static int blkdev_close(struct inode * inode, struct file * filp) -{ - struct block_device *bdev = I_BDEV(filp->f_mapping->host); - if (bdev->bd_holder == filp) - bd_release(bdev); - return blkdev_put(bdev, filp->f_mode); -} - -static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) -{ - struct block_device *bdev = I_BDEV(file->f_mapping->host); - fmode_t mode = file->f_mode; - - /* - * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have - * to updated it before every ioctl. - */ - if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY; - else - mode &= ~FMODE_NDELAY; - - return blkdev_ioctl(bdev, mode, cmd, arg); -} - -/* - * Try to release a page associated with block device when the system - * is under memory pressure. - */ -static int blkdev_releasepage(struct page *page, gfp_t wait) -{ - struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; - - if (super && super->s_op->bdev_try_to_free_page) - return super->s_op->bdev_try_to_free_page(super, page, wait); - - return try_to_free_buffers(page); -} - -static const struct address_space_operations def_blk_aops = { - .readpage = blkdev_readpage, - .writepage = blkdev_writepage, - .sync_page = block_sync_page, - .write_begin = blkdev_write_begin, - .write_end = blkdev_write_end, - .writepages = generic_writepages, - .releasepage = blkdev_releasepage, - .direct_IO = blkdev_direct_IO, -}; - -const struct file_operations def_blk_fops = { - .open = blkdev_open, - .release = blkdev_close, -#ifndef DDE_LINUX - .llseek = block_llseek, - .read = do_sync_read, - .write = do_sync_write, - .aio_read = generic_file_aio_read, - .aio_write = generic_file_aio_write_nolock, - .mmap = generic_file_mmap, - .fsync = block_fsync, - .unlocked_ioctl = block_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = compat_blkdev_ioctl, -#endif - .splice_read = generic_file_splice_read, - .splice_write = generic_file_splice_write, -#endif /* DDE_LINUX */ -}; - -int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) -{ - int res; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - res = blkdev_ioctl(bdev, 0, cmd, arg); - set_fs(old_fs); - return res; -} - -EXPORT_SYMBOL(ioctl_by_bdev); - -/** - * lookup_bdev - lookup a struct block_device by name - * @pathname: special file representing the block device - * - * Get a reference to the blockdevice at @pathname in the current - * namespace if possible and return it. Return ERR_PTR(error) - * otherwise. - */ -struct block_device *lookup_bdev(const char *pathname) -{ - struct block_device *bdev; - struct inode *inode; - struct path path; - int error; - - if (!pathname || !*pathname) - return ERR_PTR(-EINVAL); - - error = kern_path(pathname, LOOKUP_FOLLOW, &path); - if (error) - return ERR_PTR(error); - - inode = path.dentry->d_inode; - error = -ENOTBLK; - if (!S_ISBLK(inode->i_mode)) - goto fail; - error = -EACCES; - if (path.mnt->mnt_flags & MNT_NODEV) - goto fail; - error = -ENOMEM; - bdev = bd_acquire(inode); - if (!bdev) - goto fail; -out: - path_put(&path); - return bdev; -fail: - bdev = ERR_PTR(error); - goto out; -} -EXPORT_SYMBOL(lookup_bdev); - -/** - * open_bdev_exclusive - open a block device by name and set it up for use - * - * @path: special file representing the block device - * @mode: FMODE_... combination to pass be used - * @holder: owner for exclusion - * - * Open the blockdevice described by the special file at @path, claim it - * for the @holder. - */ -struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder) -{ - struct block_device *bdev; - int error = 0; - - bdev = lookup_bdev(path); - if (IS_ERR(bdev)) - return bdev; - - error = blkdev_get(bdev, mode); - if (error) - return ERR_PTR(error); - error = -EACCES; - if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) - goto blkdev_put; - error = bd_claim(bdev, holder); - if (error) - goto blkdev_put; - - return bdev; - -blkdev_put: - blkdev_put(bdev, mode); - return ERR_PTR(error); -} - -EXPORT_SYMBOL(open_bdev_exclusive); - -/** - * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive() - * - * @bdev: blockdevice to close - * @mode: mode, must match that used to open. - * - * This is the counterpart to open_bdev_exclusive(). - */ -void close_bdev_exclusive(struct block_device *bdev, fmode_t mode) -{ - bd_release(bdev); - blkdev_put(bdev, mode); -} - -EXPORT_SYMBOL(close_bdev_exclusive); - -int __invalidate_device(struct block_device *bdev) -{ - struct super_block *sb = get_super(bdev); - int res = 0; - - if (sb) { - /* - * no need to lock the super, get_super holds the - * read mutex so the filesystem cannot go away - * under us (->put_super runs with the write lock - * hold). - */ - shrink_dcache_sb(sb); - res = invalidate_inodes(sb); - drop_super(sb); - } - invalidate_bdev(bdev); - return res; -} -EXPORT_SYMBOL(__invalidate_device); diff --git a/libdde_linux26/lib/src/fs/buffer.c b/libdde_linux26/lib/src/fs/buffer.c deleted file mode 100644 index d3b1c445..00000000 --- a/libdde_linux26/lib/src/fs/buffer.c +++ /dev/null @@ -1,3474 +0,0 @@ -/* - * linux/fs/buffer.c - * - * Copyright (C) 1991, 1992, 2002 Linus Torvalds - */ - -/* - * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 - * - * Removed a lot of unnecessary code and simplified things now that - * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 - * - * Speed up hash, lru, and free list operations. Use gfp() for allocating - * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM - * - * Added 32k buffer block sizes - these are required older ARM systems. - RMK - * - * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> - */ - -#include <linux/kernel.h> -#include <linux/syscalls.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/percpu.h> -#include <linux/slab.h> -#include <linux/capability.h> -#include <linux/blkdev.h> -#include <linux/file.h> -#include <linux/quotaops.h> -#include <linux/highmem.h> -#include <linux/module.h> -#include <linux/writeback.h> -#include <linux/hash.h> -#include <linux/suspend.h> -#include <linux/buffer_head.h> -#include <linux/task_io_accounting_ops.h> -#include <linux/bio.h> -#include <linux/notifier.h> -#include <linux/cpu.h> -#include <linux/bitops.h> -#include <linux/mpage.h> -#include <linux/bit_spinlock.h> - -static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); - -#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) - -inline void -init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) -{ - bh->b_end_io = handler; - bh->b_private = private; -} - -static int sync_buffer(void *word) -{ - struct block_device *bd; - struct buffer_head *bh - = container_of(word, struct buffer_head, b_state); - - smp_mb(); - bd = bh->b_bdev; - if (bd) - blk_run_address_space(bd->bd_inode->i_mapping); - io_schedule(); - return 0; -} - -void __lock_buffer(struct buffer_head *bh) -{ - wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, - TASK_UNINTERRUPTIBLE); -} -EXPORT_SYMBOL(__lock_buffer); - -void unlock_buffer(struct buffer_head *bh) -{ - clear_bit_unlock(BH_Lock, &bh->b_state); - smp_mb__after_clear_bit(); - wake_up_bit(&bh->b_state, BH_Lock); -} - -/* - * Block until a buffer comes unlocked. This doesn't stop it - * from becoming locked again - you have to lock it yourself - * if you want to preserve its state. - */ -void __wait_on_buffer(struct buffer_head * bh) -{ - wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); -} - -static void -__clear_page_buffers(struct page *page) -{ - ClearPagePrivate(page); - set_page_private(page, 0); - page_cache_release(page); -} - - -static int quiet_error(struct buffer_head *bh) -{ - if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) - return 0; - return 1; -} - - -static void buffer_io_error(struct buffer_head *bh) -{ - char b[BDEVNAME_SIZE]; - printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", - bdevname(bh->b_bdev, b), - (unsigned long long)bh->b_blocknr); -} - -/* - * End-of-IO handler helper function which does not touch the bh after - * unlocking it. - * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but - * a race there is benign: unlock_buffer() only use the bh's address for - * hashing after unlocking the buffer, so it doesn't actually touch the bh - * itself. - */ -static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) -{ - if (uptodate) { - set_buffer_uptodate(bh); - } else { - /* This happens, due to failed READA attempts. */ - clear_buffer_uptodate(bh); - } - unlock_buffer(bh); -} - -/* - * Default synchronous end-of-IO handler.. Just mark it up-to-date and - * unlock the buffer. This is what ll_rw_block uses too. - */ -void end_buffer_read_sync(struct buffer_head *bh, int uptodate) -{ - __end_buffer_read_notouch(bh, uptodate); - put_bh(bh); -} - -void end_buffer_write_sync(struct buffer_head *bh, int uptodate) -{ - char b[BDEVNAME_SIZE]; - - if (uptodate) { - set_buffer_uptodate(bh); - } else { - if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) { - buffer_io_error(bh); - printk(KERN_WARNING "lost page write due to " - "I/O error on %s\n", - bdevname(bh->b_bdev, b)); - } - set_buffer_write_io_error(bh); - clear_buffer_uptodate(bh); - } - unlock_buffer(bh); - put_bh(bh); -} - -/* - * Write out and wait upon all the dirty data associated with a block - * device via its mapping. Does not take the superblock lock. - */ -int sync_blockdev(struct block_device *bdev) -{ -#ifndef DDE_LINUX - int ret = 0; - - if (bdev) - ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); - return ret; -#else - WARN_UNIMPL; - return 0; -#endif /* DDE_LINUX */ -} -EXPORT_SYMBOL(sync_blockdev); - -/* - * Write out and wait upon all dirty data associated with this - * device. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_bdev(struct block_device *bdev) -{ -#ifndef DDE_LINUX - struct super_block *sb = get_super(bdev); - if (sb) { - int res = fsync_super(sb); - drop_super(sb); - return res; - } - return sync_blockdev(bdev); -#else - WARN_UNIMPL; - return -1; -#endif -} - -/** - * freeze_bdev -- lock a filesystem and force it into a consistent state - * @bdev: blockdevice to lock - * - * This takes the block device bd_mount_sem to make sure no new mounts - * happen on bdev until thaw_bdev() is called. - * If a superblock is found on this device, we take the s_umount semaphore - * on it to make sure nobody unmounts until the snapshot creation is done. - * The reference counter (bd_fsfreeze_count) guarantees that only the last - * unfreeze process can unfreeze the frozen filesystem actually when multiple - * freeze requests arrive simultaneously. It counts up in freeze_bdev() and - * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze - * actually. - */ -struct super_block *freeze_bdev(struct block_device *bdev) -{ - struct super_block *sb; - int error = 0; - - mutex_lock(&bdev->bd_fsfreeze_mutex); - if (bdev->bd_fsfreeze_count > 0) { - bdev->bd_fsfreeze_count++; - sb = get_super(bdev); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return sb; - } - bdev->bd_fsfreeze_count++; - - down(&bdev->bd_mount_sem); - sb = get_super(bdev); - if (sb && !(sb->s_flags & MS_RDONLY)) { - sb->s_frozen = SB_FREEZE_WRITE; - smp_wmb(); - - __fsync_super(sb); - - sb->s_frozen = SB_FREEZE_TRANS; - smp_wmb(); - - sync_blockdev(sb->s_bdev); - - if (sb->s_op->freeze_fs) { - error = sb->s_op->freeze_fs(sb); - if (error) { - printk(KERN_ERR - "VFS:Filesystem freeze failed\n"); - sb->s_frozen = SB_UNFROZEN; - drop_super(sb); - up(&bdev->bd_mount_sem); - bdev->bd_fsfreeze_count--; - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return ERR_PTR(error); - } - } - } - - sync_blockdev(bdev); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - - return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ -} -EXPORT_SYMBOL(freeze_bdev); - -/** - * thaw_bdev -- unlock filesystem - * @bdev: blockdevice to unlock - * @sb: associated superblock - * - * Unlocks the filesystem and marks it writeable again after freeze_bdev(). - */ -int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - int error = 0; - - mutex_lock(&bdev->bd_fsfreeze_mutex); - if (!bdev->bd_fsfreeze_count) { - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return -EINVAL; - } - - bdev->bd_fsfreeze_count--; - if (bdev->bd_fsfreeze_count > 0) { - if (sb) - drop_super(sb); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return 0; - } - - if (sb) { - BUG_ON(sb->s_bdev != bdev); - if (!(sb->s_flags & MS_RDONLY)) { - if (sb->s_op->unfreeze_fs) { - error = sb->s_op->unfreeze_fs(sb); - if (error) { - printk(KERN_ERR - "VFS:Filesystem thaw failed\n"); - sb->s_frozen = SB_FREEZE_TRANS; - bdev->bd_fsfreeze_count++; - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return error; - } - } - sb->s_frozen = SB_UNFROZEN; - smp_wmb(); - wake_up(&sb->s_wait_unfrozen); - } - drop_super(sb); - } - - up(&bdev->bd_mount_sem); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return 0; -} -EXPORT_SYMBOL(thaw_bdev); - -/* - * Various filesystems appear to want __find_get_block to be non-blocking. - * But it's the page lock which protects the buffers. To get around this, - * we get exclusion from try_to_free_buffers with the blockdev mapping's - * private_lock. - * - * Hack idea: for the blockdev mapping, i_bufferlist_lock contention - * may be quite high. This code could TryLock the page, and if that - * succeeds, there is no need to take private_lock. (But if - * private_lock is contended then so is mapping->tree_lock). - */ -static struct buffer_head * -__find_get_block_slow(struct block_device *bdev, sector_t block) -{ - struct inode *bd_inode = bdev->bd_inode; - struct address_space *bd_mapping = bd_inode->i_mapping; - struct buffer_head *ret = NULL; - pgoff_t index; - struct buffer_head *bh; - struct buffer_head *head; - struct page *page; - int all_mapped = 1; - - index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); - page = find_get_page(bd_mapping, index); - if (!page) - goto out; - - spin_lock(&bd_mapping->private_lock); - if (!page_has_buffers(page)) - goto out_unlock; - head = page_buffers(page); - bh = head; - do { - if (bh->b_blocknr == block) { - ret = bh; - get_bh(bh); - goto out_unlock; - } - if (!buffer_mapped(bh)) - all_mapped = 0; - bh = bh->b_this_page; - } while (bh != head); - - /* we might be here because some of the buffers on this page are - * not mapped. This is due to various races between - * file io on the block device and getblk. It gets dealt with - * elsewhere, don't buffer_error if we had some unmapped buffers - */ - if (all_mapped) { - printk("__find_get_block_slow() failed. " - "block=%llu, b_blocknr=%llu\n", - (unsigned long long)block, - (unsigned long long)bh->b_blocknr); - printk("b_state=0x%08lx, b_size=%zu\n", - bh->b_state, bh->b_size); - printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); - } -out_unlock: - spin_unlock(&bd_mapping->private_lock); - page_cache_release(page); -out: - return ret; -} - -/* If invalidate_buffers() will trash dirty buffers, it means some kind - of fs corruption is going on. Trashing dirty data always imply losing - information that was supposed to be just stored on the physical layer - by the user. - - Thus invalidate_buffers in general usage is not allwowed to trash - dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to - be preserved. These buffers are simply skipped. - - We also skip buffers which are still in use. For example this can - happen if a userspace program is reading the block device. - - NOTE: In the case where the user removed a removable-media-disk even if - there's still dirty data not synced on disk (due a bug in the device driver - or due an error of the user), by not destroying the dirty buffers we could - generate corruption also on the next media inserted, thus a parameter is - necessary to handle this case in the most safe way possible (trying - to not corrupt also the new disk inserted with the data belonging to - the old now corrupted disk). Also for the ramdisk the natural thing - to do in order to release the ramdisk memory is to destroy dirty buffers. - - These are two special cases. Normal usage imply the device driver - to issue a sync on the device (without waiting I/O completion) and - then an invalidate_buffers call that doesn't trash dirty buffers. - - For handling cache coherency with the blkdev pagecache the 'update' case - is been introduced. It is needed to re-read from disk any pinned - buffer. NOTE: re-reading from disk is destructive so we can do it only - when we assume nobody is changing the buffercache under our I/O and when - we think the disk contains more recent information than the buffercache. - The update == 1 pass marks the buffers we need to update, the update == 2 - pass does the actual I/O. */ -void invalidate_bdev(struct block_device *bdev) -{ - struct address_space *mapping = bdev->bd_inode->i_mapping; - - if (mapping->nrpages == 0) - return; - -#ifndef DDE_LINUX - invalidate_bh_lrus(); - invalidate_mapping_pages(mapping, 0, -1); -#endif -} - -/* - * Kick pdflush then try to free up some ZONE_NORMAL memory. - */ -static void free_more_memory(void) -{ - struct zone *zone; - int nid; - -#ifndef DDE_LINUX - wakeup_pdflush(1024); - yield(); - - for_each_online_node(nid) { - (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), - gfp_zone(GFP_NOFS), NULL, - &zone); - if (zone) - try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, - GFP_NOFS); - } -#else - WARN_UNIMPL; -#endif -} - -/* - * I/O completion handler for block_read_full_page() - pages - * which come unlocked at the end of I/O. - */ -static void end_buffer_async_read(struct buffer_head *bh, int uptodate) -{ - unsigned long flags; - struct buffer_head *first; - struct buffer_head *tmp; - struct page *page; - int page_uptodate = 1; - - BUG_ON(!buffer_async_read(bh)); - - page = bh->b_page; - if (uptodate) { - set_buffer_uptodate(bh); - } else { - clear_buffer_uptodate(bh); - if (!quiet_error(bh)) - buffer_io_error(bh); - SetPageError(page); - } - - /* - * Be _very_ careful from here on. Bad things can happen if - * two buffer heads end IO at almost the same time and both - * decide that the page is now completely done. - */ - first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; - do { - if (!buffer_uptodate(tmp)) - page_uptodate = 0; - if (buffer_async_read(tmp)) { - BUG_ON(!buffer_locked(tmp)); - goto still_busy; - } - tmp = tmp->b_this_page; - } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - - /* - * If none of the buffers had errors and they are all - * uptodate then we can set the page uptodate. - */ - if (page_uptodate && !PageError(page)) - SetPageUptodate(page); - unlock_page(page); - return; - -still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; -} - -/* - * Completion handler for block_write_full_page() - pages which are unlocked - * during I/O, and which have PageWriteback cleared upon I/O completion. - */ -static void end_buffer_async_write(struct buffer_head *bh, int uptodate) -{ - char b[BDEVNAME_SIZE]; - unsigned long flags; - struct buffer_head *first; - struct buffer_head *tmp; - struct page *page; - - BUG_ON(!buffer_async_write(bh)); - - page = bh->b_page; - if (uptodate) { - set_buffer_uptodate(bh); - } else { - if (!quiet_error(bh)) { - buffer_io_error(bh); - printk(KERN_WARNING "lost page write due to " - "I/O error on %s\n", - bdevname(bh->b_bdev, b)); - } - set_bit(AS_EIO, &page->mapping->flags); - set_buffer_write_io_error(bh); - clear_buffer_uptodate(bh); - SetPageError(page); - } - - first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); - - clear_buffer_async_write(bh); - unlock_buffer(bh); - tmp = bh->b_this_page; - while (tmp != bh) { - if (buffer_async_write(tmp)) { - BUG_ON(!buffer_locked(tmp)); - goto still_busy; - } - tmp = tmp->b_this_page; - } - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - end_page_writeback(page); - return; - -still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; -} - -/* - * If a page's buffers are under async readin (end_buffer_async_read - * completion) then there is a possibility that another thread of - * control could lock one of the buffers after it has completed - * but while some of the other buffers have not completed. This - * locked buffer would confuse end_buffer_async_read() into not unlocking - * the page. So the absence of BH_Async_Read tells end_buffer_async_read() - * that this buffer is not under async I/O. - * - * The page comes unlocked when it has no locked buffer_async buffers - * left. - * - * PageLocked prevents anyone starting new async I/O reads any of - * the buffers. - * - * PageWriteback is used to prevent simultaneous writeout of the same - * page. - * - * PageLocked prevents anyone from starting writeback of a page which is - * under read I/O (PageWriteback is only ever set against a locked page). - */ -static void mark_buffer_async_read(struct buffer_head *bh) -{ - bh->b_end_io = end_buffer_async_read; - set_buffer_async_read(bh); -} - -void mark_buffer_async_write(struct buffer_head *bh) -{ - bh->b_end_io = end_buffer_async_write; - set_buffer_async_write(bh); -} -EXPORT_SYMBOL(mark_buffer_async_write); - - -/* - * fs/buffer.c contains helper functions for buffer-backed address space's - * fsync functions. A common requirement for buffer-based filesystems is - * that certain data from the backing blockdev needs to be written out for - * a successful fsync(). For example, ext2 indirect blocks need to be - * written back and waited upon before fsync() returns. - * - * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), - * inode_has_buffers() and invalidate_inode_buffers() are provided for the - * management of a list of dependent buffers at ->i_mapping->private_list. - * - * Locking is a little subtle: try_to_free_buffers() will remove buffers - * from their controlling inode's queue when they are being freed. But - * try_to_free_buffers() will be operating against the *blockdev* mapping - * at the time, not against the S_ISREG file which depends on those buffers. - * So the locking for private_list is via the private_lock in the address_space - * which backs the buffers. Which is different from the address_space - * against which the buffers are listed. So for a particular address_space, - * mapping->private_lock does *not* protect mapping->private_list! In fact, - * mapping->private_list will always be protected by the backing blockdev's - * ->private_lock. - * - * Which introduces a requirement: all buffers on an address_space's - * ->private_list must be from the same address_space: the blockdev's. - * - * address_spaces which do not place buffers at ->private_list via these - * utility functions are free to use private_lock and private_list for - * whatever they want. The only requirement is that list_empty(private_list) - * be true at clear_inode() time. - * - * FIXME: clear_inode should not call invalidate_inode_buffers(). The - * filesystems should do that. invalidate_inode_buffers() should just go - * BUG_ON(!list_empty). - * - * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should - * take an address_space, not an inode. And it should be called - * mark_buffer_dirty_fsync() to clearly define why those buffers are being - * queued up. - * - * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the - * list if it is already on a list. Because if the buffer is on a list, - * it *must* already be on the right one. If not, the filesystem is being - * silly. This will save a ton of locking. But first we have to ensure - * that buffers are taken *off* the old inode's list when they are freed - * (presumably in truncate). That requires careful auditing of all - * filesystems (do it inside bforget()). It could also be done by bringing - * b_inode back. - */ - -/* - * The buffer's backing address_space's private_lock must be held - */ -static void __remove_assoc_queue(struct buffer_head *bh) -{ - list_del_init(&bh->b_assoc_buffers); - WARN_ON(!bh->b_assoc_map); - if (buffer_write_io_error(bh)) - set_bit(AS_EIO, &bh->b_assoc_map->flags); - bh->b_assoc_map = NULL; -} - -int inode_has_buffers(struct inode *inode) -{ - return !list_empty(&inode->i_data.private_list); -} - -/* - * osync is designed to support O_SYNC io. It waits synchronously for - * all already-submitted IO to complete, but does not queue any new - * writes to the disk. - * - * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as - * you dirty the buffers, and then use osync_inode_buffers to wait for - * completion. Any other dirty buffers which are not yet queued for - * write will not be flushed to disk by the osync. - */ -static int osync_buffers_list(spinlock_t *lock, struct list_head *list) -{ - struct buffer_head *bh; - struct list_head *p; - int err = 0; - - spin_lock(lock); -repeat: - list_for_each_prev(p, list) { - bh = BH_ENTRY(p); - if (buffer_locked(bh)) { - get_bh(bh); - spin_unlock(lock); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) - err = -EIO; - brelse(bh); - spin_lock(lock); - goto repeat; - } - } - spin_unlock(lock); - return err; -} - -/** - * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers - * @mapping: the mapping which wants those buffers written - * - * Starts I/O against the buffers at mapping->private_list, and waits upon - * that I/O. - * - * Basically, this is a convenience function for fsync(). - * @mapping is a file or directory which needs those buffers to be written for - * a successful fsync(). - */ -int sync_mapping_buffers(struct address_space *mapping) -{ - struct address_space *buffer_mapping = mapping->assoc_mapping; - - if (buffer_mapping == NULL || list_empty(&mapping->private_list)) - return 0; - - return fsync_buffers_list(&buffer_mapping->private_lock, - &mapping->private_list); -} -EXPORT_SYMBOL(sync_mapping_buffers); - -/* - * Called when we've recently written block `bblock', and it is known that - * `bblock' was for a buffer_boundary() buffer. This means that the block at - * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's - * dirty, schedule it for IO. So that indirects merge nicely with their data. - */ -void write_boundary_block(struct block_device *bdev, - sector_t bblock, unsigned blocksize) -{ - struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); - if (bh) { - if (buffer_dirty(bh)) - ll_rw_block(WRITE, 1, &bh); - put_bh(bh); - } -} - -void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) -{ - struct address_space *mapping = inode->i_mapping; - struct address_space *buffer_mapping = bh->b_page->mapping; - - mark_buffer_dirty(bh); - if (!mapping->assoc_mapping) { - mapping->assoc_mapping = buffer_mapping; - } else { - BUG_ON(mapping->assoc_mapping != buffer_mapping); - } - if (!bh->b_assoc_map) { - spin_lock(&buffer_mapping->private_lock); - list_move_tail(&bh->b_assoc_buffers, - &mapping->private_list); - bh->b_assoc_map = mapping; - spin_unlock(&buffer_mapping->private_lock); - } -} -EXPORT_SYMBOL(mark_buffer_dirty_inode); - -/* - * Mark the page dirty, and set it dirty in the radix tree, and mark the inode - * dirty. - * - * If warn is true, then emit a warning if the page is not uptodate and has - * not been truncated. - */ -static void __set_page_dirty(struct page *page, - struct address_space *mapping, int warn) -{ - spin_lock_irq(&mapping->tree_lock); - if (page->mapping) { /* Race with truncate? */ - WARN_ON_ONCE(warn && !PageUptodate(page)); - - if (mapping_cap_account_dirty(mapping)) { - __inc_zone_page_state(page, NR_FILE_DIRTY); - __inc_bdi_stat(mapping->backing_dev_info, - BDI_RECLAIMABLE); - task_dirty_inc(current); - task_io_account_write(PAGE_CACHE_SIZE); - } - radix_tree_tag_set(&mapping->page_tree, - page_index(page), PAGECACHE_TAG_DIRTY); - } - spin_unlock_irq(&mapping->tree_lock); - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); -} - -/* - * Add a page to the dirty page list. - * - * It is a sad fact of life that this function is called from several places - * deeply under spinlocking. It may not sleep. - * - * If the page has buffers, the uptodate buffers are set dirty, to preserve - * dirty-state coherency between the page and the buffers. It the page does - * not have buffers then when they are later attached they will all be set - * dirty. - * - * The buffers are dirtied before the page is dirtied. There's a small race - * window in which a writepage caller may see the page cleanness but not the - * buffer dirtiness. That's fine. If this code were to set the page dirty - * before the buffers, a concurrent writepage caller could clear the page dirty - * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean - * page on the dirty page list. - * - * We use private_lock to lock against try_to_free_buffers while using the - * page's buffer list. Also use this to protect against clean buffers being - * added to the page after it was set dirty. - * - * FIXME: may need to call ->reservepage here as well. That's rather up to the - * address_space though. - */ -int __set_page_dirty_buffers(struct page *page) -{ - int newly_dirty; - struct address_space *mapping = page_mapping(page); - - if (unlikely(!mapping)) - return !TestSetPageDirty(page); - - spin_lock(&mapping->private_lock); - if (page_has_buffers(page)) { - struct buffer_head *head = page_buffers(page); - struct buffer_head *bh = head; - - do { - set_buffer_dirty(bh); - bh = bh->b_this_page; - } while (bh != head); - } - newly_dirty = !TestSetPageDirty(page); - spin_unlock(&mapping->private_lock); - - if (newly_dirty) - __set_page_dirty(page, mapping, 1); - return newly_dirty; -} -EXPORT_SYMBOL(__set_page_dirty_buffers); - -/* - * Write out and wait upon a list of buffers. - * - * We have conflicting pressures: we want to make sure that all - * initially dirty buffers get waited on, but that any subsequently - * dirtied buffers don't. After all, we don't want fsync to last - * forever if somebody is actively writing to the file. - * - * Do this in two main stages: first we copy dirty buffers to a - * temporary inode list, queueing the writes as we go. Then we clean - * up, waiting for those writes to complete. - * - * During this second stage, any subsequent updates to the file may end - * up refiling the buffer on the original inode's dirty list again, so - * there is a chance we will end up with a buffer queued for write but - * not yet completed on that list. So, as a final cleanup we go through - * the osync code to catch these locked, dirty buffers without requeuing - * any newly dirty buffers for write. - */ -static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) -{ - struct buffer_head *bh; - struct list_head tmp; - struct address_space *mapping; - int err = 0, err2; - - INIT_LIST_HEAD(&tmp); - - spin_lock(lock); - while (!list_empty(list)) { - bh = BH_ENTRY(list->next); - mapping = bh->b_assoc_map; - __remove_assoc_queue(bh); - /* Avoid race with mark_buffer_dirty_inode() which does - * a lockless check and we rely on seeing the dirty bit */ - smp_mb(); - if (buffer_dirty(bh) || buffer_locked(bh)) { - list_add(&bh->b_assoc_buffers, &tmp); - bh->b_assoc_map = mapping; - if (buffer_dirty(bh)) { - get_bh(bh); - spin_unlock(lock); - /* - * Ensure any pending I/O completes so that - * ll_rw_block() actually writes the current - * contents - it is a noop if I/O is still in - * flight on potentially older contents. - */ - ll_rw_block(SWRITE_SYNC, 1, &bh); - brelse(bh); - spin_lock(lock); - } - } - } - - while (!list_empty(&tmp)) { - bh = BH_ENTRY(tmp.prev); - get_bh(bh); - mapping = bh->b_assoc_map; - __remove_assoc_queue(bh); - /* Avoid race with mark_buffer_dirty_inode() which does - * a lockless check and we rely on seeing the dirty bit */ - smp_mb(); - if (buffer_dirty(bh)) { - list_add(&bh->b_assoc_buffers, - &mapping->private_list); - bh->b_assoc_map = mapping; - } - spin_unlock(lock); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) - err = -EIO; - brelse(bh); - spin_lock(lock); - } - - spin_unlock(lock); - err2 = osync_buffers_list(lock, list); - if (err) - return err; - else - return err2; -} - -/* - * Invalidate any and all dirty buffers on a given inode. We are - * probably unmounting the fs, but that doesn't mean we have already - * done a sync(). Just drop the buffers from the inode list. - * - * NOTE: we take the inode's blockdev's mapping's private_lock. Which - * assumes that all the buffers are against the blockdev. Not true - * for reiserfs. - */ -void invalidate_inode_buffers(struct inode *inode) -{ - if (inode_has_buffers(inode)) { - struct address_space *mapping = &inode->i_data; - struct list_head *list = &mapping->private_list; - struct address_space *buffer_mapping = mapping->assoc_mapping; - - spin_lock(&buffer_mapping->private_lock); - while (!list_empty(list)) - __remove_assoc_queue(BH_ENTRY(list->next)); - spin_unlock(&buffer_mapping->private_lock); - } -} -EXPORT_SYMBOL(invalidate_inode_buffers); - -/* - * Remove any clean buffers from the inode's buffer list. This is called - * when we're trying to free the inode itself. Those buffers can pin it. - * - * Returns true if all buffers were removed. - */ -int remove_inode_buffers(struct inode *inode) -{ - int ret = 1; - - if (inode_has_buffers(inode)) { - struct address_space *mapping = &inode->i_data; - struct list_head *list = &mapping->private_list; - struct address_space *buffer_mapping = mapping->assoc_mapping; - - spin_lock(&buffer_mapping->private_lock); - while (!list_empty(list)) { - struct buffer_head *bh = BH_ENTRY(list->next); - if (buffer_dirty(bh)) { - ret = 0; - break; - } - __remove_assoc_queue(bh); - } - spin_unlock(&buffer_mapping->private_lock); - } - return ret; -} - -/* - * Create the appropriate buffers when given a page for data area and - * the size of each buffer.. Use the bh->b_this_page linked list to - * follow the buffers created. Return NULL if unable to create more - * buffers. - * - * The retry flag is used to differentiate async IO (paging, swapping) - * which may not fail from ordinary buffer allocations. - */ -struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, - int retry) -{ - struct buffer_head *bh, *head; - long offset; - -try_again: - head = NULL; - offset = PAGE_SIZE; - while ((offset -= size) >= 0) { - bh = alloc_buffer_head(GFP_NOFS); - if (!bh) - goto no_grow; - - bh->b_bdev = NULL; - bh->b_this_page = head; - bh->b_blocknr = -1; - head = bh; - - bh->b_state = 0; - atomic_set(&bh->b_count, 0); - bh->b_private = NULL; - bh->b_size = size; - - /* Link the buffer to its page */ - set_bh_page(bh, page, offset); - - init_buffer(bh, NULL, NULL); - } - return head; -/* - * In case anything failed, we just free everything we got. - */ -no_grow: - if (head) { - do { - bh = head; - head = head->b_this_page; - free_buffer_head(bh); - } while (head); - } - - /* - * Return failure for non-async IO requests. Async IO requests - * are not allowed to fail, so we have to wait until buffer heads - * become available. But we don't want tasks sleeping with - * partially complete buffers, so all were released above. - */ - if (!retry) - return NULL; - - /* We're _really_ low on memory. Now we just - * wait for old buffer heads to become free due to - * finishing IO. Since this is an async request and - * the reserve list is empty, we're sure there are - * async buffer heads in use. - */ - free_more_memory(); - goto try_again; -} -EXPORT_SYMBOL_GPL(alloc_page_buffers); - -static inline void -link_dev_buffers(struct page *page, struct buffer_head *head) -{ - struct buffer_head *bh, *tail; - - bh = head; - do { - tail = bh; - bh = bh->b_this_page; - } while (bh); - tail->b_this_page = head; - attach_page_buffers(page, head); -} - -/* - * Initialise the state of a blockdev page's buffers. - */ -static void -init_page_buffers(struct page *page, struct block_device *bdev, - sector_t block, int size) -{ - struct buffer_head *head = page_buffers(page); - struct buffer_head *bh = head; - int uptodate = PageUptodate(page); - - do { - if (!buffer_mapped(bh)) { - init_buffer(bh, NULL, NULL); - bh->b_bdev = bdev; - bh->b_blocknr = block; - if (uptodate) - set_buffer_uptodate(bh); - set_buffer_mapped(bh); - } - block++; - bh = bh->b_this_page; - } while (bh != head); -} - -/* - * Create the page-cache page that contains the requested block. - * - * This is user purely for blockdev mappings. - */ -static struct page * -grow_dev_page(struct block_device *bdev, sector_t block, - pgoff_t index, int size) -{ - struct inode *inode = bdev->bd_inode; - struct page *page; - struct buffer_head *bh; - -#ifdef DDE_LINUX - WARN_UNIMPL; - return NULL; -#endif - - page = find_or_create_page(inode->i_mapping, index, - (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); - if (!page) - return NULL; - - BUG_ON(!PageLocked(page)); - - if (page_has_buffers(page)) { - bh = page_buffers(page); - if (bh->b_size == size) { - init_page_buffers(page, bdev, block, size); - return page; - } - if (!try_to_free_buffers(page)) - goto failed; - } - - /* - * Allocate some buffers for this page - */ - bh = alloc_page_buffers(page, size, 0); - if (!bh) - goto failed; - - /* - * Link the page to the buffers and initialise them. Take the - * lock to be atomic wrt __find_get_block(), which does not - * run under the page lock. - */ - spin_lock(&inode->i_mapping->private_lock); - link_dev_buffers(page, bh); - init_page_buffers(page, bdev, block, size); - spin_unlock(&inode->i_mapping->private_lock); - return page; - -failed: - BUG(); - unlock_page(page); - page_cache_release(page); - return NULL; -} - -/* - * Create buffers for the specified block device block's page. If - * that page was dirty, the buffers are set dirty also. - */ -static int -grow_buffers(struct block_device *bdev, sector_t block, int size) -{ - struct page *page; - pgoff_t index; - int sizebits; - - sizebits = -1; - do { - sizebits++; - } while ((size << sizebits) < PAGE_SIZE); - - index = block >> sizebits; - - /* - * Check for a block which wants to lie outside our maximum possible - * pagecache index. (this comparison is done using sector_t types). - */ - if (unlikely(index != block >> sizebits)) { - char b[BDEVNAME_SIZE]; - - printk(KERN_ERR "%s: requested out-of-range block %llu for " - "device %s\n", - __func__, (unsigned long long)block, - bdevname(bdev, b)); - return -EIO; - } - block = index << sizebits; - /* Create a page with the proper size buffers.. */ - page = grow_dev_page(bdev, block, index, size); - if (!page) - return 0; - unlock_page(page); - page_cache_release(page); - return 1; -} - -static struct buffer_head * -__getblk_slow(struct block_device *bdev, sector_t block, int size) -{ - /* Size must be multiple of hard sectorsize */ - if (unlikely(size & (bdev_hardsect_size(bdev)-1) || - (size < 512 || size > PAGE_SIZE))) { - printk(KERN_ERR "getblk(): invalid block size %d requested\n", - size); - printk(KERN_ERR "hardsect size: %d\n", - bdev_hardsect_size(bdev)); - - dump_stack(); - return NULL; - } - - for (;;) { - struct buffer_head * bh; - int ret; - - bh = __find_get_block(bdev, block, size); - if (bh) - return bh; - - ret = grow_buffers(bdev, block, size); - if (ret < 0) - return NULL; - if (ret == 0) - free_more_memory(); - } -} - -/* - * The relationship between dirty buffers and dirty pages: - * - * Whenever a page has any dirty buffers, the page's dirty bit is set, and - * the page is tagged dirty in its radix tree. - * - * At all times, the dirtiness of the buffers represents the dirtiness of - * subsections of the page. If the page has buffers, the page dirty bit is - * merely a hint about the true dirty state. - * - * When a page is set dirty in its entirety, all its buffers are marked dirty - * (if the page has buffers). - * - * When a buffer is marked dirty, its page is dirtied, but the page's other - * buffers are not. - * - * Also. When blockdev buffers are explicitly read with bread(), they - * individually become uptodate. But their backing page remains not - * uptodate - even if all of its buffers are uptodate. A subsequent - * block_read_full_page() against that page will discover all the uptodate - * buffers, will set the page uptodate and will perform no I/O. - */ - -/** - * mark_buffer_dirty - mark a buffer_head as needing writeout - * @bh: the buffer_head to mark dirty - * - * mark_buffer_dirty() will set the dirty bit against the buffer, then set its - * backing page dirty, then tag the page as dirty in its address_space's radix - * tree and then attach the address_space's inode to its superblock's dirty - * inode list. - * - * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, - * mapping->tree_lock and the global inode_lock. - */ -void mark_buffer_dirty(struct buffer_head *bh) -{ -#ifndef DDE_LINUX - WARN_ON_ONCE(!buffer_uptodate(bh)); - - /* - * Very *carefully* optimize the it-is-already-dirty case. - * - * Don't let the final "is it dirty" escape to before we - * perhaps modified the buffer. - */ - if (buffer_dirty(bh)) { - smp_mb(); - if (buffer_dirty(bh)) - return; - } - - if (!test_set_buffer_dirty(bh)) { - struct page *page = bh->b_page; - if (!TestSetPageDirty(page)) - __set_page_dirty(page, page_mapping(page), 0); - } -#else - WARN_UNIMPL; -#endif -} - -/* - * Decrement a buffer_head's reference count. If all buffers against a page - * have zero reference count, are clean and unlocked, and if the page is clean - * and unlocked then try_to_free_buffers() may strip the buffers from the page - * in preparation for freeing it (sometimes, rarely, buffers are removed from - * a page but it ends up not being freed, and buffers may later be reattached). - */ -void __brelse(struct buffer_head * buf) -{ - if (atomic_read(&buf->b_count)) { - put_bh(buf); - return; - } - WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); -} - -/* - * bforget() is like brelse(), except it discards any - * potentially dirty data. - */ -void __bforget(struct buffer_head *bh) -{ - clear_buffer_dirty(bh); - if (bh->b_assoc_map) { - struct address_space *buffer_mapping = bh->b_page->mapping; - - spin_lock(&buffer_mapping->private_lock); - list_del_init(&bh->b_assoc_buffers); - bh->b_assoc_map = NULL; - spin_unlock(&buffer_mapping->private_lock); - } - __brelse(bh); -} - -static struct buffer_head *__bread_slow(struct buffer_head *bh) -{ - lock_buffer(bh); - if (buffer_uptodate(bh)) { - unlock_buffer(bh); - return bh; - } else { - get_bh(bh); - bh->b_end_io = end_buffer_read_sync; - submit_bh(READ, bh); - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return bh; - } - brelse(bh); - return NULL; -} - -/* - * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). - * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their - * refcount elevated by one when they're in an LRU. A buffer can only appear - * once in a particular CPU's LRU. A single buffer can be present in multiple - * CPU's LRUs at the same time. - * - * This is a transparent caching front-end to sb_bread(), sb_getblk() and - * sb_find_get_block(). - * - * The LRUs themselves only need locking against invalidate_bh_lrus. We use - * a local interrupt disable for that. - */ - -#define BH_LRU_SIZE 8 - -struct bh_lru { - struct buffer_head *bhs[BH_LRU_SIZE]; -}; - -static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; - -#ifdef CONFIG_SMP -#define bh_lru_lock() local_irq_disable() -#define bh_lru_unlock() local_irq_enable() -#else -#define bh_lru_lock() preempt_disable() -#define bh_lru_unlock() preempt_enable() -#endif - -static inline void check_irqs_on(void) -{ -#ifdef irqs_disabled - BUG_ON(irqs_disabled()); -#endif -} - -/* - * The LRU management algorithm is dopey-but-simple. Sorry. - */ -static void bh_lru_install(struct buffer_head *bh) -{ - struct buffer_head *evictee = NULL; - struct bh_lru *lru; - - check_irqs_on(); - bh_lru_lock(); - lru = &__get_cpu_var(bh_lrus); - if (lru->bhs[0] != bh) { - struct buffer_head *bhs[BH_LRU_SIZE]; - int in; - int out = 0; - - get_bh(bh); - bhs[out++] = bh; - for (in = 0; in < BH_LRU_SIZE; in++) { - struct buffer_head *bh2 = lru->bhs[in]; - - if (bh2 == bh) { - __brelse(bh2); - } else { - if (out >= BH_LRU_SIZE) { - BUG_ON(evictee != NULL); - evictee = bh2; - } else { - bhs[out++] = bh2; - } - } - } - while (out < BH_LRU_SIZE) - bhs[out++] = NULL; - memcpy(lru->bhs, bhs, sizeof(bhs)); - } - bh_lru_unlock(); - - if (evictee) - __brelse(evictee); -} - -/* - * Look up the bh in this cpu's LRU. If it's there, move it to the head. - */ -static struct buffer_head * -lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) -{ - struct buffer_head *ret = NULL; - struct bh_lru *lru; - unsigned int i; - - check_irqs_on(); - bh_lru_lock(); - lru = &__get_cpu_var(bh_lrus); - for (i = 0; i < BH_LRU_SIZE; i++) { - struct buffer_head *bh = lru->bhs[i]; - - if (bh && bh->b_bdev == bdev && - bh->b_blocknr == block && bh->b_size == size) { - if (i) { - while (i) { - lru->bhs[i] = lru->bhs[i - 1]; - i--; - } - lru->bhs[0] = bh; - } - get_bh(bh); - ret = bh; - break; - } - } - bh_lru_unlock(); - return ret; -} - -/* - * Perform a pagecache lookup for the matching buffer. If it's there, refresh - * it in the LRU and mark it as accessed. If it is not present then return - * NULL - */ -struct buffer_head * -__find_get_block(struct block_device *bdev, sector_t block, unsigned size) -{ - struct buffer_head *bh = lookup_bh_lru(bdev, block, size); - - if (bh == NULL) { - bh = __find_get_block_slow(bdev, block); - if (bh) - bh_lru_install(bh); - } - if (bh) - touch_buffer(bh); - return bh; -} -EXPORT_SYMBOL(__find_get_block); - -/* - * __getblk will locate (and, if necessary, create) the buffer_head - * which corresponds to the passed block_device, block and size. The - * returned buffer has its reference count incremented. - * - * __getblk() cannot fail - it just keeps trying. If you pass it an - * illegal block number, __getblk() will happily return a buffer_head - * which represents the non-existent block. Very weird. - * - * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() - * attempt is failing. FIXME, perhaps? - */ -struct buffer_head * -__getblk(struct block_device *bdev, sector_t block, unsigned size) -{ - struct buffer_head *bh = __find_get_block(bdev, block, size); - - might_sleep(); - if (bh == NULL) - bh = __getblk_slow(bdev, block, size); - return bh; -} -EXPORT_SYMBOL(__getblk); - -/* - * Do async read-ahead on a buffer.. - */ -void __breadahead(struct block_device *bdev, sector_t block, unsigned size) -{ - struct buffer_head *bh = __getblk(bdev, block, size); - if (likely(bh)) { - ll_rw_block(READA, 1, &bh); - brelse(bh); - } -} -EXPORT_SYMBOL(__breadahead); - -/** - * __bread() - reads a specified block and returns the bh - * @bdev: the block_device to read from - * @block: number of block - * @size: size (in bytes) to read - * - * Reads a specified block, and returns buffer head that contains it. - * It returns NULL if the block was unreadable. - */ -struct buffer_head * -__bread(struct block_device *bdev, sector_t block, unsigned size) -{ - struct buffer_head *bh = __getblk(bdev, block, size); - - if (likely(bh) && !buffer_uptodate(bh)) - bh = __bread_slow(bh); - return bh; -} -EXPORT_SYMBOL(__bread); - -/* - * invalidate_bh_lrus() is called rarely - but not only at unmount. - * This doesn't race because it runs in each cpu either in irq - * or with preempt disabled. - */ -static void invalidate_bh_lru(void *arg) -{ - struct bh_lru *b = &get_cpu_var(bh_lrus); - int i; - - for (i = 0; i < BH_LRU_SIZE; i++) { - brelse(b->bhs[i]); - b->bhs[i] = NULL; - } - put_cpu_var(bh_lrus); -} - -void invalidate_bh_lrus(void) -{ -#ifndef DDE_LINUX - on_each_cpu(invalidate_bh_lru, NULL, 1); -#endif -} -EXPORT_SYMBOL_GPL(invalidate_bh_lrus); - -void set_bh_page(struct buffer_head *bh, - struct page *page, unsigned long offset) -{ - bh->b_page = page; - BUG_ON(offset >= PAGE_SIZE); - if (PageHighMem(page)) - /* - * This catches illegal uses and preserves the offset: - */ - bh->b_data = (char *)(0 + offset); - else - bh->b_data = page_address(page) + offset; -} -EXPORT_SYMBOL(set_bh_page); - -/* - * Called when truncating a buffer on a page completely. - */ -static void discard_buffer(struct buffer_head * bh) -{ - lock_buffer(bh); - clear_buffer_dirty(bh); - bh->b_bdev = NULL; - clear_buffer_mapped(bh); - clear_buffer_req(bh); - clear_buffer_new(bh); - clear_buffer_delay(bh); - clear_buffer_unwritten(bh); - unlock_buffer(bh); -} - -/** - * block_invalidatepage - invalidate part of all of a buffer-backed page - * - * @page: the page which is affected - * @offset: the index of the truncation point - * - * block_invalidatepage() is called when all or part of the page has become - * invalidatedby a truncate operation. - * - * block_invalidatepage() does not have to release all buffers, but it must - * ensure that no dirty buffer is left outside @offset and that no I/O - * is underway against any of the blocks which are outside the truncation - * point. Because the caller is about to free (and possibly reuse) those - * blocks on-disk. - */ -void block_invalidatepage(struct page *page, unsigned long offset) -{ - struct buffer_head *head, *bh, *next; - unsigned int curr_off = 0; - - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) - goto out; - - head = page_buffers(page); - bh = head; - do { - unsigned int next_off = curr_off + bh->b_size; - next = bh->b_this_page; - - /* - * is this block fully invalidated? - */ - if (offset <= curr_off) - discard_buffer(bh); - curr_off = next_off; - bh = next; - } while (bh != head); - - /* - * We release buffers only if the entire page is being invalidated. - * The get_block cached value has been unconditionally invalidated, - * so real IO is not possible anymore. - */ - if (offset == 0) - try_to_release_page(page, 0); -out: - return; -} -EXPORT_SYMBOL(block_invalidatepage); - -/* - * We attach and possibly dirty the buffers atomically wrt - * __set_page_dirty_buffers() via private_lock. try_to_free_buffers - * is already excluded via the page lock. - */ -void create_empty_buffers(struct page *page, - unsigned long blocksize, unsigned long b_state) -{ - struct buffer_head *bh, *head, *tail; - - head = alloc_page_buffers(page, blocksize, 1); - bh = head; - do { - bh->b_state |= b_state; - tail = bh; - bh = bh->b_this_page; - } while (bh); - tail->b_this_page = head; - - spin_lock(&page->mapping->private_lock); - if (PageUptodate(page) || PageDirty(page)) { - bh = head; - do { - if (PageDirty(page)) - set_buffer_dirty(bh); - if (PageUptodate(page)) - set_buffer_uptodate(bh); - bh = bh->b_this_page; - } while (bh != head); - } - attach_page_buffers(page, head); - spin_unlock(&page->mapping->private_lock); -} -EXPORT_SYMBOL(create_empty_buffers); - -/* - * We are taking a block for data and we don't want any output from any - * buffer-cache aliases starting from return from that function and - * until the moment when something will explicitly mark the buffer - * dirty (hopefully that will not happen until we will free that block ;-) - * We don't even need to mark it not-uptodate - nobody can expect - * anything from a newly allocated buffer anyway. We used to used - * unmap_buffer() for such invalidation, but that was wrong. We definitely - * don't want to mark the alias unmapped, for example - it would confuse - * anyone who might pick it with bread() afterwards... - * - * Also.. Note that bforget() doesn't lock the buffer. So there can - * be writeout I/O going on against recently-freed buffers. We don't - * wait on that I/O in bforget() - it's more efficient to wait on the I/O - * only if we really need to. That happens here. - */ -void unmap_underlying_metadata(struct block_device *bdev, sector_t block) -{ - struct buffer_head *old_bh; - - might_sleep(); - - old_bh = __find_get_block_slow(bdev, block); - if (old_bh) { - clear_buffer_dirty(old_bh); - wait_on_buffer(old_bh); - clear_buffer_req(old_bh); - __brelse(old_bh); - } -} -EXPORT_SYMBOL(unmap_underlying_metadata); - -/* - * NOTE! All mapped/uptodate combinations are valid: - * - * Mapped Uptodate Meaning - * - * No No "unknown" - must do get_block() - * No Yes "hole" - zero-filled - * Yes No "allocated" - allocated on disk, not read in - * Yes Yes "valid" - allocated and up-to-date in memory. - * - * "Dirty" is valid only with the last case (mapped+uptodate). - */ - -/* - * While block_write_full_page is writing back the dirty buffers under - * the page lock, whoever dirtied the buffers may decide to clean them - * again at any time. We handle that by only looking at the buffer - * state inside lock_buffer(). - * - * If block_write_full_page() is called for regular writeback - * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a - * locked buffer. This only can happen if someone has written the buffer - * directly, with submit_bh(). At the address_space level PageWriteback - * prevents this contention from occurring. - */ -static int __block_write_full_page(struct inode *inode, struct page *page, - get_block_t *get_block, struct writeback_control *wbc) -{ - int err; - sector_t block; - sector_t last_block; - struct buffer_head *bh, *head; - const unsigned blocksize = 1 << inode->i_blkbits; - int nr_underway = 0; - - BUG_ON(!PageLocked(page)); - - last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; - - if (!page_has_buffers(page)) { - create_empty_buffers(page, blocksize, - (1 << BH_Dirty)|(1 << BH_Uptodate)); - } - - /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers - * here, and the (potentially unmapped) buffers may become dirty at - * any time. If a buffer becomes dirty here after we've inspected it - * then we just miss that fact, and the page stays dirty. - * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; - * handle that here by just cleaning them. - */ - - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - head = page_buffers(page); - bh = head; - - /* - * Get all the dirty buffers mapped to disk addresses and - * handle any aliases from the underlying blockdev's mapping. - */ - do { - if (block > last_block) { - /* - * mapped buffers outside i_size will occur, because - * this page can be outside i_size when there is a - * truncate in progress. - */ - /* - * The buffer was zeroed by block_write_full_page() - */ - clear_buffer_dirty(bh); - set_buffer_uptodate(bh); - } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && - buffer_dirty(bh)) { - WARN_ON(bh->b_size != blocksize); - err = get_block(inode, block, bh, 1); - if (err) - goto recover; - clear_buffer_delay(bh); - if (buffer_new(bh)) { - /* blockdev mappings never come here */ - clear_buffer_new(bh); - unmap_underlying_metadata(bh->b_bdev, - bh->b_blocknr); - } - } - bh = bh->b_this_page; - block++; - } while (bh != head); - - do { - if (!buffer_mapped(bh)) - continue; - /* - * If it's a fully non-blocking write attempt and we cannot - * lock the buffer then redirty the page. Note that this can - * potentially cause a busy-wait loop from pdflush and kswapd - * activity, but those code paths have their own higher-level - * throttling. - */ - if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { - lock_buffer(bh); - } else if (!trylock_buffer(bh)) { - redirty_page_for_writepage(wbc, page); - continue; - } - if (test_clear_buffer_dirty(bh)) { - mark_buffer_async_write(bh); - } else { - unlock_buffer(bh); - } - } while ((bh = bh->b_this_page) != head); - - /* - * The page and its buffers are protected by PageWriteback(), so we can - * drop the bh refcounts early. - */ - BUG_ON(PageWriteback(page)); - set_page_writeback(page); - - do { - struct buffer_head *next = bh->b_this_page; - if (buffer_async_write(bh)) { - submit_bh(WRITE, bh); - nr_underway++; - } - bh = next; - } while (bh != head); - unlock_page(page); - - err = 0; -done: - if (nr_underway == 0) { - /* - * The page was marked dirty, but the buffers were - * clean. Someone wrote them back by hand with - * ll_rw_block/submit_bh. A rare case. - */ - end_page_writeback(page); - - /* - * The page and buffer_heads can be released at any time from - * here on. - */ - } - return err; - -recover: - /* - * ENOSPC, or some other error. We may already have added some - * blocks to the file, so we need to write these out to avoid - * exposing stale data. - * The page is currently locked and not marked for writeback - */ - bh = head; - /* Recovery: lock and submit the mapped buffers */ - do { - if (buffer_mapped(bh) && buffer_dirty(bh) && - !buffer_delay(bh)) { - lock_buffer(bh); - mark_buffer_async_write(bh); - } else { - /* - * The buffer may have been set dirty during - * attachment to a dirty page. - */ - clear_buffer_dirty(bh); - } - } while ((bh = bh->b_this_page) != head); - SetPageError(page); - BUG_ON(PageWriteback(page)); - mapping_set_error(page->mapping, err); - set_page_writeback(page); - do { - struct buffer_head *next = bh->b_this_page; - if (buffer_async_write(bh)) { - clear_buffer_dirty(bh); - submit_bh(WRITE, bh); - nr_underway++; - } - bh = next; - } while (bh != head); - unlock_page(page); - goto done; -} - -/* - * If a page has any new buffers, zero them out here, and mark them uptodate - * and dirty so they'll be written out (in order to prevent uninitialised - * block data from leaking). And clear the new bit. - */ -void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) -{ - unsigned int block_start, block_end; - struct buffer_head *head, *bh; - - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) - return; - - bh = head = page_buffers(page); - block_start = 0; - do { - block_end = block_start + bh->b_size; - - if (buffer_new(bh)) { - if (block_end > from && block_start < to) { - if (!PageUptodate(page)) { - unsigned start, size; - - start = max(from, block_start); - size = min(to, block_end) - start; - - zero_user(page, start, size); - set_buffer_uptodate(bh); - } - - clear_buffer_new(bh); - mark_buffer_dirty(bh); - } - } - - block_start = block_end; - bh = bh->b_this_page; - } while (bh != head); -} -EXPORT_SYMBOL(page_zero_new_buffers); - -static int __block_prepare_write(struct inode *inode, struct page *page, - unsigned from, unsigned to, get_block_t *get_block) -{ - unsigned block_start, block_end; - sector_t block; - int err = 0; - unsigned blocksize, bbits; - struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; - - BUG_ON(!PageLocked(page)); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); - BUG_ON(from > to); - - blocksize = 1 << inode->i_blkbits; - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); - head = page_buffers(page); - - bbits = inode->i_blkbits; - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); - - for(bh = head, block_start = 0; bh != head || !block_start; - block++, block_start=block_end, bh = bh->b_this_page) { - block_end = block_start + blocksize; - if (block_end <= from || block_start >= to) { - if (PageUptodate(page)) { - if (!buffer_uptodate(bh)) - set_buffer_uptodate(bh); - } - continue; - } - if (buffer_new(bh)) - clear_buffer_new(bh); - if (!buffer_mapped(bh)) { - WARN_ON(bh->b_size != blocksize); - err = get_block(inode, block, bh, 1); - if (err) - break; - if (buffer_new(bh)) { - unmap_underlying_metadata(bh->b_bdev, - bh->b_blocknr); - if (PageUptodate(page)) { - clear_buffer_new(bh); - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - continue; - } - if (block_end > to || block_start < from) - zero_user_segments(page, - to, block_end, - block_start, from); - continue; - } - } - if (PageUptodate(page)) { - if (!buffer_uptodate(bh)) - set_buffer_uptodate(bh); - continue; - } - if (!buffer_uptodate(bh) && !buffer_delay(bh) && - !buffer_unwritten(bh) && - (block_start < from || block_end > to)) { - ll_rw_block(READ, 1, &bh); - *wait_bh++=bh; - } - } - /* - * If we issued read requests - let them complete. - */ - while(wait_bh > wait) { - wait_on_buffer(*--wait_bh); - if (!buffer_uptodate(*wait_bh)) - err = -EIO; - } - if (unlikely(err)) - page_zero_new_buffers(page, from, to); - return err; -} - -static int __block_commit_write(struct inode *inode, struct page *page, - unsigned from, unsigned to) -{ - unsigned block_start, block_end; - int partial = 0; - unsigned blocksize; - struct buffer_head *bh, *head; - - blocksize = 1 << inode->i_blkbits; - - for(bh = head = page_buffers(page), block_start = 0; - bh != head || !block_start; - block_start=block_end, bh = bh->b_this_page) { - block_end = block_start + blocksize; - if (block_end <= from || block_start >= to) { - if (!buffer_uptodate(bh)) - partial = 1; - } else { - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - } - clear_buffer_new(bh); - } - - /* - * If this is a partial write which happened to make all buffers - * uptodate then we can optimize away a bogus readpage() for - * the next read(). Here we 'discover' whether the page went - * uptodate as a result of this (potentially partial) write. - */ - if (!partial) - SetPageUptodate(page); - return 0; -} - -/* - * block_write_begin takes care of the basic task of block allocation and - * bringing partial write blocks uptodate first. - * - * If *pagep is not NULL, then block_write_begin uses the locked page - * at *pagep rather than allocating its own. In this case, the page will - * not be unlocked or deallocated on failure. - */ -int block_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata, - get_block_t *get_block) -{ -#ifndef DDE_LINUX - struct inode *inode = mapping->host; - int status = 0; - struct page *page; - pgoff_t index; - unsigned start, end; - int ownpage = 0; - - index = pos >> PAGE_CACHE_SHIFT; - start = pos & (PAGE_CACHE_SIZE - 1); - end = start + len; - - page = *pagep; - if (page == NULL) { - ownpage = 1; - page = grab_cache_page_write_begin(mapping, index, flags); - if (!page) { - status = -ENOMEM; - goto out; - } - *pagep = page; - } else - BUG_ON(!PageLocked(page)); - - status = __block_prepare_write(inode, page, start, end, get_block); - if (unlikely(status)) { - ClearPageUptodate(page); - - if (ownpage) { - unlock_page(page); - page_cache_release(page); - *pagep = NULL; - -#ifndef DDE_LINUX - /* - * prepare_write() may have instantiated a few blocks - * outside i_size. Trim these off again. Don't need - * i_size_read because we hold i_mutex. - */ - if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); -#endif - } - } - -out: - return status; -#else - WARN_UNIMPL; - return -1; -#endif -} -EXPORT_SYMBOL(block_write_begin); - -int block_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = mapping->host; - unsigned start; - - start = pos & (PAGE_CACHE_SIZE - 1); - - if (unlikely(copied < len)) { - /* - * The buffers that were written will now be uptodate, so we - * don't have to worry about a readpage reading them and - * overwriting a partial write. However if we have encountered - * a short write and only partially written into a buffer, it - * will not be marked uptodate, so a readpage might come in and - * destroy our partial write. - * - * Do the simplest thing, and just treat any short write to a - * non uptodate page as a zero-length write, and force the - * caller to redo the whole thing. - */ - if (!PageUptodate(page)) - copied = 0; - - page_zero_new_buffers(page, start+copied, start+len); - } - flush_dcache_page(page); - - /* This could be a short (even 0-length) commit */ - __block_commit_write(inode, page, start, start+copied); - - return copied; -} -EXPORT_SYMBOL(block_write_end); - -int generic_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = mapping->host; - int i_size_changed = 0; - - copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); - - /* - * No need to use i_size_read() here, the i_size - * cannot change under us because we hold i_mutex. - * - * But it's important to update i_size while still holding page lock: - * page writeout could otherwise come in and zero beyond i_size. - */ - if (pos+copied > inode->i_size) { - i_size_write(inode, pos+copied); - i_size_changed = 1; - } - - unlock_page(page); - page_cache_release(page); - - /* - * Don't mark the inode dirty under page lock. First, it unnecessarily - * makes the holding time of page lock longer. Second, it forces lock - * ordering of page lock and transaction start for journaling - * filesystems. - */ - if (i_size_changed) - mark_inode_dirty(inode); - - return copied; -} -EXPORT_SYMBOL(generic_write_end); - -/* - * block_is_partially_uptodate checks whether buffers within a page are - * uptodate or not. - * - * Returns true if all buffers which correspond to a file portion - * we want to read are uptodate. - */ -int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, - unsigned long from) -{ - struct inode *inode = page->mapping->host; - unsigned block_start, block_end, blocksize; - unsigned to; - struct buffer_head *bh, *head; - int ret = 1; - - if (!page_has_buffers(page)) - return 0; - - blocksize = 1 << inode->i_blkbits; - to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count); - to = from + to; - if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) - return 0; - - head = page_buffers(page); - bh = head; - block_start = 0; - do { - block_end = block_start + blocksize; - if (block_end > from && block_start < to) { - if (!buffer_uptodate(bh)) { - ret = 0; - break; - } - if (block_end >= to) - break; - } - block_start = block_end; - bh = bh->b_this_page; - } while (bh != head); - - return ret; -} -EXPORT_SYMBOL(block_is_partially_uptodate); - -/* - * Generic "read page" function for block devices that have the normal - * get_block functionality. This is most of the block device filesystems. - * Reads the page asynchronously --- the unlock_buffer() and - * set/clear_buffer_uptodate() functions propagate buffer state into the - * page struct once IO has completed. - */ -int block_read_full_page(struct page *page, get_block_t *get_block) -{ - struct inode *inode = page->mapping->host; - sector_t iblock, lblock; - struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; - unsigned int blocksize; - int nr, i; - int fully_mapped = 1; - - BUG_ON(!PageLocked(page)); - blocksize = 1 << inode->i_blkbits; - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); - head = page_buffers(page); - - iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; - bh = head; - nr = 0; - i = 0; - - do { - if (buffer_uptodate(bh)) - continue; - - if (!buffer_mapped(bh)) { - int err = 0; - - fully_mapped = 0; - if (iblock < lblock) { - WARN_ON(bh->b_size != blocksize); - err = get_block(inode, iblock, bh, 0); - if (err) - SetPageError(page); - } - if (!buffer_mapped(bh)) { - zero_user(page, i * blocksize, blocksize); - if (!err) - set_buffer_uptodate(bh); - continue; - } - /* - * get_block() might have updated the buffer - * synchronously - */ - if (buffer_uptodate(bh)) - continue; - } - arr[nr++] = bh; - } while (i++, iblock++, (bh = bh->b_this_page) != head); - - if (fully_mapped) - SetPageMappedToDisk(page); - - if (!nr) { - /* - * All buffers are uptodate - we can set the page uptodate - * as well. But not if get_block() returned an error. - */ - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); - return 0; - } - - /* Stage two: lock the buffers */ - for (i = 0; i < nr; i++) { - bh = arr[i]; - lock_buffer(bh); - mark_buffer_async_read(bh); - } - - /* - * Stage 3: start the IO. Check for uptodateness - * inside the buffer lock in case another process reading - * the underlying blockdev brought it uptodate (the sct fix). - */ - for (i = 0; i < nr; i++) { - bh = arr[i]; - if (buffer_uptodate(bh)) - end_buffer_async_read(bh, 1); - else - submit_bh(READ, bh); - } - return 0; -} - -/* utility function for filesystems that need to do work on expanding - * truncates. Uses filesystem pagecache writes to allow the filesystem to - * deal with the hole. - */ -int generic_cont_expand_simple(struct inode *inode, loff_t size) -{ - struct address_space *mapping = inode->i_mapping; - struct page *page; - void *fsdata; - unsigned long limit; - int err; - - err = -EFBIG; - limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; - if (limit != RLIM_INFINITY && size > (loff_t)limit) { - send_sig(SIGXFSZ, current, 0); - goto out; - } - if (size > inode->i_sb->s_maxbytes) - goto out; - - err = pagecache_write_begin(NULL, mapping, size, 0, - AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, - &page, &fsdata); - if (err) - goto out; - - err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); - BUG_ON(err > 0); - -out: - return err; -} - -static int cont_expand_zero(struct file *file, struct address_space *mapping, - loff_t pos, loff_t *bytes) -{ - struct inode *inode = mapping->host; - unsigned blocksize = 1 << inode->i_blkbits; - struct page *page; - void *fsdata; - pgoff_t index, curidx; - loff_t curpos; - unsigned zerofrom, offset, len; - int err = 0; - - index = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; - - while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { - zerofrom = curpos & ~PAGE_CACHE_MASK; - if (zerofrom & (blocksize-1)) { - *bytes |= (blocksize-1); - (*bytes)++; - } - len = PAGE_CACHE_SIZE - zerofrom; - - err = pagecache_write_begin(file, mapping, curpos, len, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); - if (err) - goto out; - zero_user(page, zerofrom, len); - err = pagecache_write_end(file, mapping, curpos, len, len, - page, fsdata); - if (err < 0) - goto out; - BUG_ON(err != len); - err = 0; - - balance_dirty_pages_ratelimited(mapping); - } - - /* page covers the boundary, find the boundary offset */ - if (index == curidx) { - zerofrom = curpos & ~PAGE_CACHE_MASK; - /* if we will expand the thing last block will be filled */ - if (offset <= zerofrom) { - goto out; - } - if (zerofrom & (blocksize-1)) { - *bytes |= (blocksize-1); - (*bytes)++; - } - len = offset - zerofrom; - - err = pagecache_write_begin(file, mapping, curpos, len, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); - if (err) - goto out; - zero_user(page, zerofrom, len); - err = pagecache_write_end(file, mapping, curpos, len, len, - page, fsdata); - if (err < 0) - goto out; - BUG_ON(err != len); - err = 0; - } -out: - return err; -} - -/* - * For moronic filesystems that do not allow holes in file. - * We may have to extend the file. - */ -int cont_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata, - get_block_t *get_block, loff_t *bytes) -{ - struct inode *inode = mapping->host; - unsigned blocksize = 1 << inode->i_blkbits; - unsigned zerofrom; - int err; - - err = cont_expand_zero(file, mapping, pos, bytes); - if (err) - goto out; - - zerofrom = *bytes & ~PAGE_CACHE_MASK; - if (pos+len > *bytes && zerofrom & (blocksize-1)) { - *bytes |= (blocksize-1); - (*bytes)++; - } - - *pagep = NULL; - err = block_write_begin(file, mapping, pos, len, - flags, pagep, fsdata, get_block); -out: - return err; -} - -int block_prepare_write(struct page *page, unsigned from, unsigned to, - get_block_t *get_block) -{ - struct inode *inode = page->mapping->host; - int err = __block_prepare_write(inode, page, from, to, get_block); - if (err) - ClearPageUptodate(page); - return err; -} - -int block_commit_write(struct page *page, unsigned from, unsigned to) -{ - struct inode *inode = page->mapping->host; - __block_commit_write(inode,page,from,to); - return 0; -} - -/* - * block_page_mkwrite() is not allowed to change the file size as it gets - * called from a page fault handler when a page is first dirtied. Hence we must - * be careful to check for EOF conditions here. We set the page up correctly - * for a written page which means we get ENOSPC checking when writing into - * holes and correct delalloc and unwritten extent mapping on filesystems that - * support these features. - * - * We are not allowed to take the i_mutex here so we have to play games to - * protect against truncate races as the page could now be beyond EOF. Because - * vmtruncate() writes the inode size before removing pages, once we have the - * page lock we can determine safely if the page is beyond EOF. If it is not - * beyond EOF, then the page is guaranteed safe against truncation until we - * unlock the page. - */ -int -block_page_mkwrite(struct vm_area_struct *vma, struct page *page, - get_block_t get_block) -{ - struct inode *inode = vma->vm_file->f_path.dentry->d_inode; - unsigned long end; - loff_t size; - int ret = -EINVAL; - - lock_page(page); - size = i_size_read(inode); - if ((page->mapping != inode->i_mapping) || - (page_offset(page) > size)) { - /* page got truncated out from underneath us */ - goto out_unlock; - } - - /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) - end = size & ~PAGE_CACHE_MASK; - else - end = PAGE_CACHE_SIZE; - - ret = block_prepare_write(page, 0, end, get_block); - if (!ret) - ret = block_commit_write(page, 0, end); - -out_unlock: - unlock_page(page); - return ret; -} - -/* - * nobh_write_begin()'s prereads are special: the buffer_heads are freed - * immediately, while under the page lock. So it needs a special end_io - * handler which does not touch the bh after unlocking it. - */ -static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) -{ - __end_buffer_read_notouch(bh, uptodate); -} - -/* - * Attach the singly-linked list of buffers created by nobh_write_begin, to - * the page (converting it to circular linked list and taking care of page - * dirty races). - */ -static void attach_nobh_buffers(struct page *page, struct buffer_head *head) -{ - struct buffer_head *bh; - - BUG_ON(!PageLocked(page)); - - spin_lock(&page->mapping->private_lock); - bh = head; - do { - if (PageDirty(page)) - set_buffer_dirty(bh); - if (!bh->b_this_page) - bh->b_this_page = head; - bh = bh->b_this_page; - } while (bh != head); - attach_page_buffers(page, head); - spin_unlock(&page->mapping->private_lock); -} - -/* - * On entry, the page is fully not uptodate. - * On exit the page is fully uptodate in the areas outside (from,to) - */ -int nobh_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata, - get_block_t *get_block) -{ - struct inode *inode = mapping->host; - const unsigned blkbits = inode->i_blkbits; - const unsigned blocksize = 1 << blkbits; - struct buffer_head *head, *bh; - struct page *page; - pgoff_t index; - unsigned from, to; - unsigned block_in_page; - unsigned block_start, block_end; - sector_t block_in_file; - int nr_reads = 0; - int ret = 0; - int is_mapped_to_disk = 1; - - index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); - to = from + len; - - page = grab_cache_page_write_begin(mapping, index, flags); - if (!page) - return -ENOMEM; - *pagep = page; - *fsdata = NULL; - - if (page_has_buffers(page)) { - unlock_page(page); - page_cache_release(page); - *pagep = NULL; - return block_write_begin(file, mapping, pos, len, flags, pagep, - fsdata, get_block); - } - - if (PageMappedToDisk(page)) - return 0; - - /* - * Allocate buffers so that we can keep track of state, and potentially - * attach them to the page if an error occurs. In the common case of - * no error, they will just be freed again without ever being attached - * to the page (which is all OK, because we're under the page lock). - * - * Be careful: the buffer linked list is a NULL terminated one, rather - * than the circular one we're used to. - */ - head = alloc_page_buffers(page, blocksize, 0); - if (!head) { - ret = -ENOMEM; - goto out_release; - } - - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); - - /* - * We loop across all blocks in the page, whether or not they are - * part of the affected region. This is so we can discover if the - * page is fully mapped-to-disk. - */ - for (block_start = 0, block_in_page = 0, bh = head; - block_start < PAGE_CACHE_SIZE; - block_in_page++, block_start += blocksize, bh = bh->b_this_page) { - int create; - - block_end = block_start + blocksize; - bh->b_state = 0; - create = 1; - if (block_start >= to) - create = 0; - ret = get_block(inode, block_in_file + block_in_page, - bh, create); - if (ret) - goto failed; - if (!buffer_mapped(bh)) - is_mapped_to_disk = 0; - if (buffer_new(bh)) - unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); - if (PageUptodate(page)) { - set_buffer_uptodate(bh); - continue; - } - if (buffer_new(bh) || !buffer_mapped(bh)) { - zero_user_segments(page, block_start, from, - to, block_end); - continue; - } - if (buffer_uptodate(bh)) - continue; /* reiserfs does this */ - if (block_start < from || block_end > to) { - lock_buffer(bh); - bh->b_end_io = end_buffer_read_nobh; - submit_bh(READ, bh); - nr_reads++; - } - } - - if (nr_reads) { - /* - * The page is locked, so these buffers are protected from - * any VM or truncate activity. Hence we don't need to care - * for the buffer_head refcounts. - */ - for (bh = head; bh; bh = bh->b_this_page) { - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) - ret = -EIO; - } - if (ret) - goto failed; - } - - if (is_mapped_to_disk) - SetPageMappedToDisk(page); - - *fsdata = head; /* to be released by nobh_write_end */ - - return 0; - -failed: - BUG_ON(!ret); - /* - * Error recovery is a bit difficult. We need to zero out blocks that - * were newly allocated, and dirty them to ensure they get written out. - * Buffers need to be attached to the page at this point, otherwise - * the handling of potential IO errors during writeout would be hard - * (could try doing synchronous writeout, but what if that fails too?) - */ - attach_nobh_buffers(page, head); - page_zero_new_buffers(page, from, to); - -out_release: - unlock_page(page); - page_cache_release(page); - *pagep = NULL; - - if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); - - return ret; -} -EXPORT_SYMBOL(nobh_write_begin); - -int nobh_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = page->mapping->host; - struct buffer_head *head = fsdata; - struct buffer_head *bh; - BUG_ON(fsdata != NULL && page_has_buffers(page)); - - if (unlikely(copied < len) && head) - attach_nobh_buffers(page, head); - if (page_has_buffers(page)) - return generic_write_end(file, mapping, pos, len, - copied, page, fsdata); - - SetPageUptodate(page); - set_page_dirty(page); - if (pos+copied > inode->i_size) { - i_size_write(inode, pos+copied); - mark_inode_dirty(inode); - } - - unlock_page(page); - page_cache_release(page); - - while (head) { - bh = head; - head = head->b_this_page; - free_buffer_head(bh); - } - - return copied; -} -EXPORT_SYMBOL(nobh_write_end); - -/* - * nobh_writepage() - based on block_full_write_page() except - * that it tries to operate without attaching bufferheads to - * the page. - */ -int nobh_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) -{ - struct inode * const inode = page->mapping->host; - loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; - unsigned offset; - int ret; - - /* Is the page fully inside i_size? */ - if (page->index < end_index) - goto out; - - /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); - if (page->index >= end_index+1 || !offset) { - /* - * The page may have dirty, unmapped buffers. For example, - * they may have been added in ext3_writepage(). Make them - * freeable here, so the page does not leak. - */ -#if 0 - /* Not really sure about this - do we need this ? */ - if (page->mapping->a_ops->invalidatepage) - page->mapping->a_ops->invalidatepage(page, offset); -#endif - unlock_page(page); - return 0; /* don't care */ - } - - /* - * The page straddles i_size. It must be zeroed out on each and every - * writepage invocation because it may be mmapped. "A file is mapped - * in multiples of the page size. For a file that is not a multiple of - * the page size, the remaining memory is zeroed when mapped, and - * writes to that region are not written out to the file." - */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); -out: - ret = mpage_writepage(page, get_block, wbc); - if (ret == -EAGAIN) - ret = __block_write_full_page(inode, page, get_block, wbc); - return ret; -} -EXPORT_SYMBOL(nobh_writepage); - -int nobh_truncate_page(struct address_space *mapping, - loff_t from, get_block_t *get_block) -{ - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned blocksize; - sector_t iblock; - unsigned length, pos; - struct inode *inode = mapping->host; - struct page *page; - struct buffer_head map_bh; - int err; - - blocksize = 1 << inode->i_blkbits; - length = offset & (blocksize - 1); - - /* Block boundary? Nothing to do */ - if (!length) - return 0; - - length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - - page = grab_cache_page(mapping, index); - err = -ENOMEM; - if (!page) - goto out; - - if (page_has_buffers(page)) { -has_buffers: - unlock_page(page); - page_cache_release(page); - return block_truncate_page(mapping, from, get_block); - } - - /* Find the buffer that contains "offset" */ - pos = blocksize; - while (offset >= pos) { - iblock++; - pos += blocksize; - } - - err = get_block(inode, iblock, &map_bh, 0); - if (err) - goto unlock; - /* unmapped? It's a hole - nothing to do */ - if (!buffer_mapped(&map_bh)) - goto unlock; - - /* Ok, it's mapped. Make sure it's up-to-date */ - if (!PageUptodate(page)) { - err = mapping->a_ops->readpage(NULL, page); - if (err) { - page_cache_release(page); - goto out; - } - lock_page(page); - if (!PageUptodate(page)) { - err = -EIO; - goto unlock; - } - if (page_has_buffers(page)) - goto has_buffers; - } - zero_user(page, offset, length); - set_page_dirty(page); - err = 0; - -unlock: - unlock_page(page); - page_cache_release(page); -out: - return err; -} -EXPORT_SYMBOL(nobh_truncate_page); - -int block_truncate_page(struct address_space *mapping, - loff_t from, get_block_t *get_block) -{ - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned blocksize; - sector_t iblock; - unsigned length, pos; - struct inode *inode = mapping->host; - struct page *page; - struct buffer_head *bh; - int err; - - blocksize = 1 << inode->i_blkbits; - length = offset & (blocksize - 1); - - /* Block boundary? Nothing to do */ - if (!length) - return 0; - - length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - - page = grab_cache_page(mapping, index); - err = -ENOMEM; - if (!page) - goto out; - - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); - - /* Find the buffer that contains "offset" */ - bh = page_buffers(page); - pos = blocksize; - while (offset >= pos) { - bh = bh->b_this_page; - iblock++; - pos += blocksize; - } - - err = 0; - if (!buffer_mapped(bh)) { - WARN_ON(bh->b_size != blocksize); - err = get_block(inode, iblock, bh, 0); - if (err) - goto unlock; - /* unmapped? It's a hole - nothing to do */ - if (!buffer_mapped(bh)) - goto unlock; - } - - /* Ok, it's mapped. Make sure it's up-to-date */ - if (PageUptodate(page)) - set_buffer_uptodate(bh); - - if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { - err = -EIO; - ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); - /* Uhhuh. Read error. Complain and punt. */ - if (!buffer_uptodate(bh)) - goto unlock; - } - - zero_user(page, offset, length); - mark_buffer_dirty(bh); - err = 0; - -unlock: - unlock_page(page); - page_cache_release(page); -out: - return err; -} - -/* - * The generic ->writepage function for buffer-backed address_spaces - */ -int block_write_full_page(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) -{ - struct inode * const inode = page->mapping->host; - loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; - unsigned offset; - - /* Is the page fully inside i_size? */ - if (page->index < end_index) - return __block_write_full_page(inode, page, get_block, wbc); - - /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); - if (page->index >= end_index+1 || !offset) { - /* - * The page may have dirty, unmapped buffers. For example, - * they may have been added in ext3_writepage(). Make them - * freeable here, so the page does not leak. - */ - do_invalidatepage(page, 0); - unlock_page(page); - return 0; /* don't care */ - } - - /* - * The page straddles i_size. It must be zeroed out on each and every - * writepage invokation because it may be mmapped. "A file is mapped - * in multiples of the page size. For a file that is not a multiple of - * the page size, the remaining memory is zeroed when mapped, and - * writes to that region are not written out to the file." - */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); - return __block_write_full_page(inode, page, get_block, wbc); -} - -sector_t generic_block_bmap(struct address_space *mapping, sector_t block, - get_block_t *get_block) -{ - struct buffer_head tmp; - struct inode *inode = mapping->host; - tmp.b_state = 0; - tmp.b_blocknr = 0; - tmp.b_size = 1 << inode->i_blkbits; - get_block(inode, block, &tmp, 0); - return tmp.b_blocknr; -} - -static void end_bio_bh_io_sync(struct bio *bio, int err) -{ - struct buffer_head *bh = bio->bi_private; - - if (err == -EOPNOTSUPP) { - set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); - set_bit(BH_Eopnotsupp, &bh->b_state); - } - - if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) - set_bit(BH_Quiet, &bh->b_state); - - bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); - bio_put(bio); -} - -int submit_bh(int rw, struct buffer_head * bh) -{ - struct bio *bio; - int ret = 0; - - BUG_ON(!buffer_locked(bh)); - BUG_ON(!buffer_mapped(bh)); - BUG_ON(!bh->b_end_io); - - /* - * Mask in barrier bit for a write (could be either a WRITE or a - * WRITE_SYNC - */ - if (buffer_ordered(bh) && (rw & WRITE)) - rw |= WRITE_BARRIER; - - /* - * Only clear out a write error when rewriting - */ - if (test_set_buffer_req(bh) && (rw & WRITE)) - clear_buffer_write_io_error(bh); - - /* - * from here on down, it's all bio -- do the initial mapping, - * submit_bio -> generic_make_request may further map this bio around - */ - bio = bio_alloc(GFP_NOIO, 1); - - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio->bi_bdev = bh->b_bdev; - bio->bi_io_vec[0].bv_page = bh->b_page; - bio->bi_io_vec[0].bv_len = bh->b_size; - bio->bi_io_vec[0].bv_offset = bh_offset(bh); - - bio->bi_vcnt = 1; - bio->bi_idx = 0; - bio->bi_size = bh->b_size; - - bio->bi_end_io = end_bio_bh_io_sync; - bio->bi_private = bh; - - bio_get(bio); - submit_bio(rw, bio); - - if (bio_flagged(bio, BIO_EOPNOTSUPP)) - ret = -EOPNOTSUPP; - - bio_put(bio); - return ret; -} - -/** - * ll_rw_block: low-level access to block devices (DEPRECATED) - * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) - * @nr: number of &struct buffer_heads in the array - * @bhs: array of pointers to &struct buffer_head - * - * ll_rw_block() takes an array of pointers to &struct buffer_heads, and - * requests an I/O operation on them, either a %READ or a %WRITE. The third - * %SWRITE is like %WRITE only we make sure that the *current* data in buffers - * are sent to disk. The fourth %READA option is described in the documentation - * for generic_make_request() which ll_rw_block() calls. - * - * This function drops any buffer that it cannot get a lock on (with the - * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be - * clean when doing a write request, and any buffer that appears to be - * up-to-date when doing read request. Further it marks as clean buffers that - * are processed for writing (the buffer cache won't assume that they are - * actually clean until the buffer gets unlocked). - * - * ll_rw_block sets b_end_io to simple completion handler that marks - * the buffer up-to-date (if approriate), unlocks the buffer and wakes - * any waiters. - * - * All of the buffers must be for the same device, and must also be a - * multiple of the current approved size for the device. - */ -void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) -{ - int i; - - for (i = 0; i < nr; i++) { - struct buffer_head *bh = bhs[i]; - - if (rw == SWRITE || rw == SWRITE_SYNC) - lock_buffer(bh); - else if (!trylock_buffer(bh)) - continue; - - if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { - if (test_clear_buffer_dirty(bh)) { - bh->b_end_io = end_buffer_write_sync; - get_bh(bh); - if (rw == SWRITE_SYNC) - submit_bh(WRITE_SYNC, bh); - else - submit_bh(WRITE, bh); - continue; - } - } else { - if (!buffer_uptodate(bh)) { - bh->b_end_io = end_buffer_read_sync; - get_bh(bh); - submit_bh(rw, bh); - continue; - } - } - unlock_buffer(bh); - } -} - -/* - * For a data-integrity writeout, we need to wait upon any in-progress I/O - * and then start new I/O and then wait upon it. The caller must have a ref on - * the buffer_head. - */ -int sync_dirty_buffer(struct buffer_head *bh) -{ - int ret = 0; - - WARN_ON(atomic_read(&bh->b_count) < 1); - lock_buffer(bh); - if (test_clear_buffer_dirty(bh)) { - get_bh(bh); - bh->b_end_io = end_buffer_write_sync; - ret = submit_bh(WRITE, bh); - wait_on_buffer(bh); - if (buffer_eopnotsupp(bh)) { - clear_buffer_eopnotsupp(bh); - ret = -EOPNOTSUPP; - } - if (!ret && !buffer_uptodate(bh)) - ret = -EIO; - } else { - unlock_buffer(bh); - } - return ret; -} - -/* - * try_to_free_buffers() checks if all the buffers on this particular page - * are unused, and releases them if so. - * - * Exclusion against try_to_free_buffers may be obtained by either - * locking the page or by holding its mapping's private_lock. - * - * If the page is dirty but all the buffers are clean then we need to - * be sure to mark the page clean as well. This is because the page - * may be against a block device, and a later reattachment of buffers - * to a dirty page will set *all* buffers dirty. Which would corrupt - * filesystem data on the same device. - * - * The same applies to regular filesystem pages: if all the buffers are - * clean then we set the page clean and proceed. To do that, we require - * total exclusion from __set_page_dirty_buffers(). That is obtained with - * private_lock. - * - * try_to_free_buffers() is non-blocking. - */ -static inline int buffer_busy(struct buffer_head *bh) -{ - return atomic_read(&bh->b_count) | - (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); -} - -static int -drop_buffers(struct page *page, struct buffer_head **buffers_to_free) -{ - struct buffer_head *head = page_buffers(page); - struct buffer_head *bh; - - bh = head; - do { - if (buffer_write_io_error(bh) && page->mapping) - set_bit(AS_EIO, &page->mapping->flags); - if (buffer_busy(bh)) - goto failed; - bh = bh->b_this_page; - } while (bh != head); - - do { - struct buffer_head *next = bh->b_this_page; - - if (bh->b_assoc_map) - __remove_assoc_queue(bh); - bh = next; - } while (bh != head); - *buffers_to_free = head; - __clear_page_buffers(page); - return 1; -failed: - return 0; -} - -int try_to_free_buffers(struct page *page) -{ - struct address_space * const mapping = page->mapping; - struct buffer_head *buffers_to_free = NULL; - int ret = 0; - - BUG_ON(!PageLocked(page)); - if (PageWriteback(page)) - return 0; - - if (mapping == NULL) { /* can this still happen? */ - ret = drop_buffers(page, &buffers_to_free); - goto out; - } - - spin_lock(&mapping->private_lock); - ret = drop_buffers(page, &buffers_to_free); - - /* - * If the filesystem writes its buffers by hand (eg ext3) - * then we can have clean buffers against a dirty page. We - * clean the page here; otherwise the VM will never notice - * that the filesystem did any IO at all. - * - * Also, during truncate, discard_buffer will have marked all - * the page's buffers clean. We discover that here and clean - * the page also. - * - * private_lock must be held over this entire operation in order - * to synchronise against __set_page_dirty_buffers and prevent the - * dirty bit from being lost. - */ -#ifndef DDE_LINUX - if (ret) - cancel_dirty_page(page, PAGE_CACHE_SIZE); -#endif - spin_unlock(&mapping->private_lock); -out: - if (buffers_to_free) { - struct buffer_head *bh = buffers_to_free; - - do { - struct buffer_head *next = bh->b_this_page; - free_buffer_head(bh); - bh = next; - } while (bh != buffers_to_free); - } - return ret; -} -EXPORT_SYMBOL(try_to_free_buffers); - -void block_sync_page(struct page *page) -{ - struct address_space *mapping; - - smp_mb(); - mapping = page_mapping(page); - if (mapping) - blk_run_backing_dev(mapping->backing_dev_info, page); -} - -/* - * There are no bdflush tunables left. But distributions are - * still running obsolete flush daemons, so we terminate them here. - * - * Use of bdflush() is deprecated and will be removed in a future kernel. - * The `pdflush' kernel threads fully replace bdflush daemons and this call. - */ -SYSCALL_DEFINE2(bdflush, int, func, long, data) -{ - static int msg_count; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (msg_count < 5) { - msg_count++; - printk(KERN_INFO - "warning: process `%s' used the obsolete bdflush" - " system call\n", current->comm); - printk(KERN_INFO "Fix your initscripts?\n"); - } - - if (func == 1) - do_exit(0); - return 0; -} - -/* - * Buffer-head allocation - */ -static struct kmem_cache *bh_cachep; - -/* - * Once the number of bh's in the machine exceeds this level, we start - * stripping them in writeback. - */ -static int max_buffer_heads; - -int buffer_heads_over_limit; - -struct bh_accounting { - int nr; /* Number of live bh's */ - int ratelimit; /* Limit cacheline bouncing */ -}; - -static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; - -static void recalc_bh_state(void) -{ - int i; - int tot = 0; - - if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) - return; - __get_cpu_var(bh_accounting).ratelimit = 0; - for_each_online_cpu(i) - tot += per_cpu(bh_accounting, i).nr; - buffer_heads_over_limit = (tot > max_buffer_heads); -} - -struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) -{ - struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); - if (ret) { - INIT_LIST_HEAD(&ret->b_assoc_buffers); - get_cpu_var(bh_accounting).nr++; - recalc_bh_state(); - put_cpu_var(bh_accounting); - } - return ret; -} -EXPORT_SYMBOL(alloc_buffer_head); - -void free_buffer_head(struct buffer_head *bh) -{ - BUG_ON(!list_empty(&bh->b_assoc_buffers)); - kmem_cache_free(bh_cachep, bh); - get_cpu_var(bh_accounting).nr--; - recalc_bh_state(); - put_cpu_var(bh_accounting); -} -EXPORT_SYMBOL(free_buffer_head); - -static void buffer_exit_cpu(int cpu) -{ - int i; - struct bh_lru *b = &per_cpu(bh_lrus, cpu); - - for (i = 0; i < BH_LRU_SIZE; i++) { - brelse(b->bhs[i]); - b->bhs[i] = NULL; - } - get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; - per_cpu(bh_accounting, cpu).nr = 0; - put_cpu_var(bh_accounting); -} - -static int buffer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) - buffer_exit_cpu((unsigned long)hcpu); - return NOTIFY_OK; -} - -/** - * bh_uptodate_or_lock - Test whether the buffer is uptodate - * @bh: struct buffer_head - * - * Return true if the buffer is up-to-date and false, - * with the buffer locked, if not. - */ -int bh_uptodate_or_lock(struct buffer_head *bh) -{ - if (!buffer_uptodate(bh)) { - lock_buffer(bh); - if (!buffer_uptodate(bh)) - return 0; - unlock_buffer(bh); - } - return 1; -} -EXPORT_SYMBOL(bh_uptodate_or_lock); - -/** - * bh_submit_read - Submit a locked buffer for reading - * @bh: struct buffer_head - * - * Returns zero on success and -EIO on error. - */ -int bh_submit_read(struct buffer_head *bh) -{ - BUG_ON(!buffer_locked(bh)); - - if (buffer_uptodate(bh)) { - unlock_buffer(bh); - return 0; - } - - get_bh(bh); - bh->b_end_io = end_buffer_read_sync; - submit_bh(READ, bh); - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return 0; - return -EIO; -} -EXPORT_SYMBOL(bh_submit_read); - -static void -init_buffer_head(void *data) -{ - struct buffer_head *bh = data; - - memset(bh, 0, sizeof(*bh)); - INIT_LIST_HEAD(&bh->b_assoc_buffers); -} - -void __init buffer_init(void) -{ - int nrpages; - - bh_cachep = kmem_cache_create("buffer_head", - sizeof(struct buffer_head), 0, - (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - init_buffer_head); - - /* - * Limit the bh occupancy to 10% of ZONE_NORMAL - */ - nrpages = (nr_free_buffer_pages() * 10) / 100; - max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); - hotcpu_notifier(buffer_cpu_notify, 0); -} - -EXPORT_SYMBOL(__bforget); -EXPORT_SYMBOL(__brelse); -EXPORT_SYMBOL(__wait_on_buffer); -EXPORT_SYMBOL(block_commit_write); -EXPORT_SYMBOL(block_prepare_write); -EXPORT_SYMBOL(block_page_mkwrite); -EXPORT_SYMBOL(block_read_full_page); -EXPORT_SYMBOL(block_sync_page); -EXPORT_SYMBOL(block_truncate_page); -EXPORT_SYMBOL(block_write_full_page); -EXPORT_SYMBOL(cont_write_begin); -EXPORT_SYMBOL(end_buffer_read_sync); -EXPORT_SYMBOL(end_buffer_write_sync); -EXPORT_SYMBOL(file_fsync); -EXPORT_SYMBOL(fsync_bdev); -EXPORT_SYMBOL(generic_block_bmap); -EXPORT_SYMBOL(generic_cont_expand_simple); -EXPORT_SYMBOL(init_buffer); -EXPORT_SYMBOL(invalidate_bdev); -EXPORT_SYMBOL(ll_rw_block); -EXPORT_SYMBOL(mark_buffer_dirty); -EXPORT_SYMBOL(submit_bh); -EXPORT_SYMBOL(sync_dirty_buffer); -EXPORT_SYMBOL(unlock_buffer); diff --git a/libdde_linux26/lib/src/fs/char_dev.c b/libdde_linux26/lib/src/fs/char_dev.c deleted file mode 100644 index 3b8e8b3d..00000000 --- a/libdde_linux26/lib/src/fs/char_dev.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * linux/fs/char_dev.c - * - * Copyright (C) 1991, 1992 Linus Torvalds - */ - -#include <linux/init.h> -#include <linux/fs.h> -#include <linux/kdev_t.h> -#include <linux/slab.h> -#include <linux/string.h> - -#include <linux/major.h> -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/smp_lock.h> -#include <linux/seq_file.h> - -#include <linux/kobject.h> -#include <linux/kobj_map.h> -#include <linux/cdev.h> -#include <linux/mutex.h> -#include <linux/backing-dev.h> - -#ifdef CONFIG_KMOD -#include <linux/kmod.h> -#endif -#include "internal.h" - -#ifdef DDE_LINUX -#include "local.h" -#endif - -/* - * capabilities for /dev/mem, /dev/kmem and similar directly mappable character - * devices - * - permits shared-mmap for read, write and/or exec - * - does not permit private mmap in NOMMU mode (can't do COW) - * - no readahead or I/O queue unplugging required - */ -struct backing_dev_info directly_mappable_cdev_bdi = { - .capabilities = ( -#ifdef CONFIG_MMU - /* permit private copies of the data to be taken */ - BDI_CAP_MAP_COPY | -#endif - /* permit direct mmap, for read, write or exec */ - BDI_CAP_MAP_DIRECT | - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), -}; - -static struct kobj_map *cdev_map; - -static DEFINE_MUTEX(chrdevs_lock); - -static struct char_device_struct { - struct char_device_struct *next; - unsigned int major; - unsigned int baseminor; - int minorct; - char name[64]; - struct cdev *cdev; /* will die */ -} *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; - -/* index in the above */ -static inline int major_to_index(int major) -{ - return major % CHRDEV_MAJOR_HASH_SIZE; -} - -#ifdef CONFIG_PROC_FS - -void chrdev_show(struct seq_file *f, off_t offset) -{ - struct char_device_struct *cd; - - if (offset < CHRDEV_MAJOR_HASH_SIZE) { - mutex_lock(&chrdevs_lock); - for (cd = chrdevs[offset]; cd; cd = cd->next) - seq_printf(f, "%3d %s\n", cd->major, cd->name); - mutex_unlock(&chrdevs_lock); - } -} - -#endif /* CONFIG_PROC_FS */ - -/* - * Register a single major with a specified minor range. - * - * If major == 0 this functions will dynamically allocate a major and return - * its number. - * - * If major > 0 this function will attempt to reserve the passed range of - * minors and will return zero on success. - * - * Returns a -ve errno on failure. - */ -static struct char_device_struct * -__register_chrdev_region(unsigned int major, unsigned int baseminor, - int minorct, const char *name) -{ - struct char_device_struct *cd, **cp; - int ret = 0; - int i; - - cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); - if (cd == NULL) - return ERR_PTR(-ENOMEM); - - mutex_lock(&chrdevs_lock); - - /* temporary */ - if (major == 0) { - for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { - if (chrdevs[i] == NULL) - break; - } - - if (i == 0) { - ret = -EBUSY; - goto out; - } - major = i; - ret = major; - } - - cd->major = major; - cd->baseminor = baseminor; - cd->minorct = minorct; - strlcpy(cd->name, name, sizeof(cd->name)); - - i = major_to_index(major); - - for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) - if ((*cp)->major > major || - ((*cp)->major == major && - (((*cp)->baseminor >= baseminor) || - ((*cp)->baseminor + (*cp)->minorct > baseminor)))) - break; - - /* Check for overlapping minor ranges. */ - if (*cp && (*cp)->major == major) { - int old_min = (*cp)->baseminor; - int old_max = (*cp)->baseminor + (*cp)->minorct - 1; - int new_min = baseminor; - int new_max = baseminor + minorct - 1; - - /* New driver overlaps from the left. */ - if (new_max >= old_min && new_max <= old_max) { - ret = -EBUSY; - goto out; - } - - /* New driver overlaps from the right. */ - if (new_min <= old_max && new_min >= old_min) { - ret = -EBUSY; - goto out; - } - } - - cd->next = *cp; - *cp = cd; - mutex_unlock(&chrdevs_lock); - return cd; -out: - mutex_unlock(&chrdevs_lock); - kfree(cd); - return ERR_PTR(ret); -} - -static struct char_device_struct * -__unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) -{ - struct char_device_struct *cd = NULL, **cp; - int i = major_to_index(major); - - mutex_lock(&chrdevs_lock); - for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) - if ((*cp)->major == major && - (*cp)->baseminor == baseminor && - (*cp)->minorct == minorct) - break; - if (*cp) { - cd = *cp; - *cp = cd->next; - } - mutex_unlock(&chrdevs_lock); - return cd; -} - -/** - * register_chrdev_region() - register a range of device numbers - * @from: the first in the desired range of device numbers; must include - * the major number. - * @count: the number of consecutive device numbers required - * @name: the name of the device or driver. - * - * Return value is zero on success, a negative error code on failure. - */ -int register_chrdev_region(dev_t from, unsigned count, const char *name) -{ - struct char_device_struct *cd; - dev_t to = from + count; - dev_t n, next; - - for (n = from; n < to; n = next) { - next = MKDEV(MAJOR(n)+1, 0); - if (next > to) - next = to; - cd = __register_chrdev_region(MAJOR(n), MINOR(n), - next - n, name); - if (IS_ERR(cd)) - goto fail; - } - return 0; -fail: - to = n; - for (n = from; n < to; n = next) { - next = MKDEV(MAJOR(n)+1, 0); - kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); - } - return PTR_ERR(cd); -} - -/** - * alloc_chrdev_region() - register a range of char device numbers - * @dev: output parameter for first assigned number - * @baseminor: first of the requested range of minor numbers - * @count: the number of minor numbers required - * @name: the name of the associated device or driver - * - * Allocates a range of char device numbers. The major number will be - * chosen dynamically, and returned (along with the first minor number) - * in @dev. Returns zero or a negative error code. - */ -int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, - const char *name) -{ - struct char_device_struct *cd; - cd = __register_chrdev_region(0, baseminor, count, name); - if (IS_ERR(cd)) - return PTR_ERR(cd); - *dev = MKDEV(cd->major, cd->baseminor); - return 0; -} - -/** - * register_chrdev() - Register a major number for character devices. - * @major: major device number or 0 for dynamic allocation - * @name: name of this range of devices - * @fops: file operations associated with this devices - * - * If @major == 0 this functions will dynamically allocate a major and return - * its number. - * - * If @major > 0 this function will attempt to reserve a device with the given - * major number and will return zero on success. - * - * Returns a -ve errno on failure. - * - * The name of this device has nothing to do with the name of the device in - * /dev. It only helps to keep track of the different owners of devices. If - * your module name has only one type of devices it's ok to use e.g. the name - * of the module here. - * - * This function registers a range of 256 minor numbers. The first minor number - * is 0. - */ -int register_chrdev(unsigned int major, const char *name, - const struct file_operations *fops) -{ - struct char_device_struct *cd; - struct cdev *cdev; - char *s; - int err = -ENOMEM; - - cd = __register_chrdev_region(major, 0, 256, name); - if (IS_ERR(cd)) - return PTR_ERR(cd); - - cdev = cdev_alloc(); - if (!cdev) - goto out2; - - cdev->owner = fops->owner; - cdev->ops = fops; - kobject_set_name(&cdev->kobj, "%s", name); - for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) - *s = '!'; - - err = cdev_add(cdev, MKDEV(cd->major, 0), 256); - if (err) - goto out; - - cd->cdev = cdev; - - return major ? 0 : cd->major; -out: - kobject_put(&cdev->kobj); -out2: - kfree(__unregister_chrdev_region(cd->major, 0, 256)); - return err; -} - -/** - * unregister_chrdev_region() - return a range of device numbers - * @from: the first in the range of numbers to unregister - * @count: the number of device numbers to unregister - * - * This function will unregister a range of @count device numbers, - * starting with @from. The caller should normally be the one who - * allocated those numbers in the first place... - */ -void unregister_chrdev_region(dev_t from, unsigned count) -{ - dev_t to = from + count; - dev_t n, next; - - for (n = from; n < to; n = next) { - next = MKDEV(MAJOR(n)+1, 0); - if (next > to) - next = to; - kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); - } -} - -void unregister_chrdev(unsigned int major, const char *name) -{ - struct char_device_struct *cd; - cd = __unregister_chrdev_region(major, 0, 256); - if (cd && cd->cdev) - cdev_del(cd->cdev); - kfree(cd); -} - -static DEFINE_SPINLOCK(cdev_lock); - -static struct kobject *cdev_get(struct cdev *p) -{ - struct module *owner = p->owner; - struct kobject *kobj; - - if (owner && !try_module_get(owner)) - return NULL; - kobj = kobject_get(&p->kobj); - if (!kobj) - module_put(owner); - return kobj; -} - -void cdev_put(struct cdev *p) -{ - if (p) { - struct module *owner = p->owner; - kobject_put(&p->kobj); - module_put(owner); - } -} - -/* - * Called every time a character special file is opened - */ -static int chrdev_open(struct inode *inode, struct file *filp) -{ - struct cdev *p; - struct cdev *new = NULL; - int ret = 0; - - spin_lock(&cdev_lock); - p = inode->i_cdev; - if (!p) { - struct kobject *kobj; - int idx; - spin_unlock(&cdev_lock); - kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); - if (!kobj) - return -ENXIO; - new = container_of(kobj, struct cdev, kobj); - spin_lock(&cdev_lock); - /* Check i_cdev again in case somebody beat us to it while - we dropped the lock. */ - p = inode->i_cdev; - if (!p) { - inode->i_cdev = p = new; - inode->i_cindex = idx; - list_add(&inode->i_devices, &p->list); - new = NULL; - } else if (!cdev_get(p)) - ret = -ENXIO; - } else if (!cdev_get(p)) - ret = -ENXIO; - spin_unlock(&cdev_lock); - cdev_put(new); - if (ret) - return ret; - - ret = -ENXIO; - filp->f_op = fops_get(p->ops); - if (!filp->f_op) - goto out_cdev_put; - - if (filp->f_op->open) { - ret = filp->f_op->open(inode,filp); - if (ret) - goto out_cdev_put; - } - - return 0; - - out_cdev_put: - cdev_put(p); - return ret; -} - -void cd_forget(struct inode *inode) -{ - spin_lock(&cdev_lock); - list_del_init(&inode->i_devices); - inode->i_cdev = NULL; - spin_unlock(&cdev_lock); -} - -static void cdev_purge(struct cdev *cdev) -{ - spin_lock(&cdev_lock); - while (!list_empty(&cdev->list)) { - struct inode *inode; - inode = container_of(cdev->list.next, struct inode, i_devices); - list_del_init(&inode->i_devices); - inode->i_cdev = NULL; - } - spin_unlock(&cdev_lock); -} - -/* - * Dummy default file-operations: the only thing this does - * is contain the open that then fills in the correct operations - * depending on the special file... - */ -const struct file_operations def_chr_fops = { - .open = chrdev_open, -}; - -static struct kobject *exact_match(dev_t dev, int *part, void *data) -{ - struct cdev *p = data; - return &p->kobj; -} - -static int exact_lock(dev_t dev, void *data) -{ - struct cdev *p = data; - return cdev_get(p) ? 0 : -1; -} - -/** - * cdev_add() - add a char device to the system - * @p: the cdev structure for the device - * @dev: the first device number for which this device is responsible - * @count: the number of consecutive minor numbers corresponding to this - * device - * - * cdev_add() adds the device represented by @p to the system, making it - * live immediately. A negative error code is returned on failure. - */ -int cdev_add(struct cdev *p, dev_t dev, unsigned count) -{ - p->dev = dev; - p->count = count; - return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); -} - -static void cdev_unmap(dev_t dev, unsigned count) -{ - kobj_unmap(cdev_map, dev, count); -} - -/** - * cdev_del() - remove a cdev from the system - * @p: the cdev structure to be removed - * - * cdev_del() removes @p from the system, possibly freeing the structure - * itself. - */ -void cdev_del(struct cdev *p) -{ - cdev_unmap(p->dev, p->count); - kobject_put(&p->kobj); -} - - -static void cdev_default_release(struct kobject *kobj) -{ - struct cdev *p = container_of(kobj, struct cdev, kobj); - cdev_purge(p); -} - -static void cdev_dynamic_release(struct kobject *kobj) -{ - struct cdev *p = container_of(kobj, struct cdev, kobj); - cdev_purge(p); - kfree(p); -} - -static struct kobj_type ktype_cdev_default = { - .release = cdev_default_release, -}; - -static struct kobj_type ktype_cdev_dynamic = { - .release = cdev_dynamic_release, -}; - -/** - * cdev_alloc() - allocate a cdev structure - * - * Allocates and returns a cdev structure, or NULL on failure. - */ -struct cdev *cdev_alloc(void) -{ - struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); - if (p) { - INIT_LIST_HEAD(&p->list); - kobject_init(&p->kobj, &ktype_cdev_dynamic); - } - return p; -} - -/** - * cdev_init() - initialize a cdev structure - * @cdev: the structure to initialize - * @fops: the file_operations for this device - * - * Initializes @cdev, remembering @fops, making it ready to add to the - * system with cdev_add(). - */ -void cdev_init(struct cdev *cdev, const struct file_operations *fops) -{ - memset(cdev, 0, sizeof *cdev); - INIT_LIST_HEAD(&cdev->list); - kobject_init(&cdev->kobj, &ktype_cdev_default); - cdev->ops = fops; -} - -static struct kobject *base_probe(dev_t dev, int *part, void *data) -{ - if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) - /* Make old-style 2.4 aliases work */ - request_module("char-major-%d", MAJOR(dev)); - return NULL; -} - -void __init chrdev_init(void) -{ - cdev_map = kobj_map_init(base_probe, &chrdevs_lock); - bdi_init(&directly_mappable_cdev_bdi); -} - -#ifndef LIBINPUT -core_initcall(chrdev_init); -#endif - -/* Let modules do char dev stuff */ -EXPORT_SYMBOL(register_chrdev_region); -EXPORT_SYMBOL(unregister_chrdev_region); -EXPORT_SYMBOL(alloc_chrdev_region); -EXPORT_SYMBOL(cdev_init); -EXPORT_SYMBOL(cdev_alloc); -EXPORT_SYMBOL(cdev_del); -EXPORT_SYMBOL(cdev_add); -EXPORT_SYMBOL(register_chrdev); -EXPORT_SYMBOL(unregister_chrdev); -EXPORT_SYMBOL(directly_mappable_cdev_bdi); |
