Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull third hunk of vfs changes from Al Viro:
 "This contains the ->direct_IO() changes from Omar + saner
  generic_write_checks() + dealing with fcntl()/{read,write}() races
  (mirroring O_APPEND/O_DIRECT into iocb->ki_flags and instead of
  repeatedly looking at ->f_flags, which can be changed by fcntl(2),
  check ->ki_flags - which cannot) + infrastructure bits for dhowells'
  d_inode annotations + Christophs switch of /dev/loop to
  vfs_iter_write()"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (30 commits)
  block: loop: switch to VFS ITER_BVEC
  configfs: Fix inconsistent use of file_inode() vs file->f_path.dentry->d_inode
  VFS: Make pathwalk use d_is_reg() rather than S_ISREG()
  VFS: Fix up debugfs to use d_is_dir() in place of S_ISDIR()
  VFS: Combine inode checks with d_is_negative() and d_is_positive() in pathwalk
  NFS: Don't use d_inode as a variable name
  VFS: Impose ordering on accesses of d_inode and d_flags
  VFS: Add owner-filesystem positive/negative dentry checks
  nfs: generic_write_checks() shouldn't be done on swapout...
  ocfs2: use __generic_file_write_iter()
  mirror O_APPEND and O_DIRECT into iocb->ki_flags
  switch generic_write_checks() to iocb and iter
  ocfs2: move generic_write_checks() before the alignment checks
  ocfs2_file_write_iter: stop messing with ppos
  udf_file_write_iter: reorder and simplify
  fuse: ->direct_IO() doesn't need generic_write_checks()
  ext4_file_write_iter: move generic_write_checks() up
  xfs_file_aio_write_checks: switch to iocb/iov_iter
  generic_write_checks(): drop isblk argument
  blkdev_write_iter: expand generic_file_checks() call in there
  ...
This commit is contained in:
Linus Torvalds
2015-04-16 23:27:56 -04:00
55 changed files with 700 additions and 870 deletions
+1 -1
View File
@@ -196,7 +196,7 @@ prototypes:
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
+1 -1
View File
@@ -590,7 +590,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
int (*launder_page) (struct page *);
+128 -182
View File
@@ -88,28 +88,6 @@ static int part_shift;
static struct workqueue_struct *loop_wq;
/*
* Transfer functions
*/
static int transfer_none(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block)
{
char *raw_buf = kmap_atomic(raw_page) + raw_off;
char *loop_buf = kmap_atomic(loop_page) + loop_off;
if (cmd == READ)
memcpy(loop_buf, raw_buf, size);
else
memcpy(raw_buf, loop_buf, size);
kunmap_atomic(loop_buf);
kunmap_atomic(raw_buf);
cond_resched();
return 0;
}
static int transfer_xor(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
@@ -148,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
static struct loop_func_table none_funcs = {
.number = LO_CRYPT_NONE,
.transfer = transfer_none,
};
};
static struct loop_func_table xor_funcs = {
.number = LO_CRYPT_XOR,
.transfer = transfer_xor,
.init = xor_init
};
};
/* xfer_funcs[0] is special - its release function is never called */
static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
@@ -215,209 +192,171 @@ lo_do_transfer(struct loop_device *lo, int cmd,
struct page *lpage, unsigned loffs,
int size, sector_t rblock)
{
if (unlikely(!lo->transfer))
int ret;
ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
if (likely(!ret))
return 0;
return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
printk_ratelimited(KERN_ERR
"loop: Transfer error at byte offset %llu, length %i.\n",
(unsigned long long)rblock << 9, size);
return ret;
}
/**
* __do_lo_send_write - helper for writing data to a loop device
*
* This helper just factors out common code between do_lo_send_direct_write()
* and do_lo_send_write().
*/
static int __do_lo_send_write(struct file *file,
u8 *buf, const int len, loff_t pos)
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct kvec kvec = {.iov_base = buf, .iov_len = len};
struct iov_iter from;
struct iov_iter i;
ssize_t bw;
iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len);
iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &from, &pos);
bw = vfs_iter_write(file, &i, ppos);
file_end_write(file);
if (likely(bw == len))
if (likely(bw == bvec->bv_len))
return 0;
printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)pos, len);
printk_ratelimited(KERN_ERR
"loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)*ppos, bvec->bv_len);
if (bw >= 0)
bw = -EIO;
return bw;
}
/**
* do_lo_send_direct_write - helper for writing data to a loop device
*
* This is the fast, non-transforming version that does not need double
* buffering.
*/
static int do_lo_send_direct_write(struct loop_device *lo,
struct bio_vec *bvec, loff_t pos, struct page *page)
static int lo_write_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
kmap(bvec->bv_page) + bvec->bv_offset,
bvec->bv_len, pos);
kunmap(bvec->bv_page);
cond_resched();
return bw;
struct bio_vec bvec;
struct req_iterator iter;
int ret = 0;
rq_for_each_segment(bvec, rq, iter) {
ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
if (ret < 0)
break;
cond_resched();
}
return ret;
}
/**
* do_lo_send_write - helper for writing data to a loop device
*
/*
* This is the slow, transforming version that needs to double buffer the
* data as it cannot do the transformations in place without having direct
* access to the destination pages of the backing file.
*/
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
loff_t pos, struct page *page)
static int lo_write_transfer(struct loop_device *lo, struct request *rq,
loff_t pos)
{
int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
bvec->bv_offset, bvec->bv_len, pos >> 9);
if (likely(!ret))
return __do_lo_send_write(lo->lo_backing_file,
page_address(page), bvec->bv_len,
pos);
printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
"length %i.\n", (unsigned long long)pos, bvec->bv_len);
if (ret > 0)
ret = -EIO;
return ret;
}
static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
struct bio_vec bvec;
struct bio_vec bvec, b;
struct req_iterator iter;
struct page *page = NULL;
struct page *page;
int ret = 0;
if (lo->transfer != transfer_none) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (unlikely(!page))
goto fail;
kmap(page);
do_lo_send = do_lo_send_write;
} else {
do_lo_send = do_lo_send_direct_write;
}
page = alloc_page(GFP_NOIO);
if (unlikely(!page))
return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
ret = do_lo_send(lo, &bvec, pos, page);
ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
bvec.bv_offset, bvec.bv_len, pos >> 9);
if (unlikely(ret))
break;
b.bv_page = page;
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
if (ret < 0)
break;
pos += bvec.bv_len;
}
if (page) {
kunmap(page);
__free_page(page);
}
out:
__free_page(page);
return ret;
fail:
printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
ret = -ENOMEM;
goto out;
}
struct lo_read_data {
struct loop_device *lo;
struct page *page;
unsigned offset;
int bsize;
};
static int
lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct lo_read_data *p = sd->u.data;
struct loop_device *lo = p->lo;
struct page *page = buf->page;
sector_t IV;
int size;
IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
(buf->offset >> 9);
size = sd->len;
if (size > p->bsize)
size = p->bsize;
if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
page->index);
size = -EINVAL;
}
flush_dcache_page(p->page);
if (size > 0)
p->offset += size;
return size;
}
static int
lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
return __splice_from_pipe(pipe, sd, lo_splice_actor);
}
static ssize_t
do_lo_receive(struct loop_device *lo,
struct bio_vec *bvec, int bsize, loff_t pos)
{
struct lo_read_data cookie;
struct splice_desc sd;
struct file *file;
ssize_t retval;
cookie.lo = lo;
cookie.page = bvec->bv_page;
cookie.offset = bvec->bv_offset;
cookie.bsize = bsize;
sd.len = 0;
sd.total_len = bvec->bv_len;
sd.flags = 0;
sd.pos = pos;
sd.u.data = &cookie;
file = lo->lo_backing_file;
retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
return retval;
}
static int
lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos)
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec;
struct req_iterator iter;
ssize_t s;
struct iov_iter i;
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
if (len < 0)
return len;
if (s != bvec.bv_len) {
flush_dcache_page(bvec.bv_page);
if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
pos += bvec.bv_len;
cond_resched();
}
return 0;
}
static int lo_read_transfer(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec, b;
struct req_iterator iter;
struct iov_iter i;
struct page *page;
ssize_t len;
int ret = 0;
page = alloc_page(GFP_NOIO);
if (unlikely(!page))
return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
loff_t offset = pos;
b.bv_page = page;
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
if (len < 0) {
ret = len;
goto out_free_page;
}
ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
bvec.bv_offset, len, offset >> 9);
if (ret)
goto out_free_page;
flush_dcache_page(bvec.bv_page);
if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
}
ret = 0;
out_free_page:
__free_page(page);
return ret;
}
static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
{
/*
@@ -464,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
ret = lo_req_flush(lo, rq);
else if (rq->cmd_flags & REQ_DISCARD)
ret = lo_discard(lo, rq, pos);
else if (lo->transfer)
ret = lo_write_transfer(lo, rq, pos);
else
ret = lo_send(lo, rq, pos);
} else
ret = lo_receive(lo, rq, lo->lo_blocksize, pos);
ret = lo_write_simple(lo, rq, pos);
} else {
if (lo->transfer)
ret = lo_read_transfer(lo, rq, pos);
else
ret = lo_read_simple(lo, rq, pos);
}
return ret;
}
@@ -788,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
lo->transfer = transfer_none;
lo->transfer = NULL;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
@@ -1007,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
info->lo_encrypt_key_size);
lo->lo_key_owner = uid;
}
}
return 0;
}
+11 -11
View File
@@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t file_offset)
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
loff_t file_offset)
{
struct lu_env *env;
struct cl_io *io;
@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
* size changing by concurrent truncates and writes.
* 1. Need inode mutex to operate transient pages.
*/
if (rw == READ)
if (iov_iter_rw(iter) == READ)
mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
size_t offs;
count = min_t(size_t, iov_iter_count(iter), size);
if (rw == READ) {
if (iov_iter_rw(iter) == READ) {
if (file_offset >= i_size_read(inode))
break;
if (file_offset + count > i_size_read(inode))
@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
result, file_offset,
pages, n);
ll_free_user_pages(pages, n, rw==READ);
result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
inode, file->f_mapping,
result, file_offset, pages,
n);
ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
}
if (unlikely(result <= 0)) {
/* If we can't allocate a large enough buffer
@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
}
out:
LASSERT(obj->cob_transient_pages == 0);
if (rw == READ)
if (iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
struct lov_stripe_md *lsm;
lsm = ccc_inode_lsm_get(inode);
+2 -3
View File
@@ -230,7 +230,6 @@ static int v9fs_launder_page(struct page *page)
/**
* v9fs_direct_IO - 9P address space operation for direct I/O
* @rw: direction (read or write)
* @iocb: target I/O control block
* @iov: array of vectors that define I/O buffer
* @pos: offset in file to begin the operation
@@ -248,12 +247,12 @@ static int v9fs_launder_page(struct page *page)
*
*/
static ssize_t
v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *file = iocb->ki_filp;
ssize_t n;
int err = 0;
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
n = p9_client_write(file->private_data, pos, iter, &err);
if (n) {
struct inode *inode = file_inode(file);
+10 -16
View File
@@ -404,21 +404,16 @@ static ssize_t
v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
ssize_t retval = 0;
loff_t origin = iocb->ki_pos;
size_t count = iov_iter_count(from);
ssize_t retval;
loff_t origin;
int err = 0;
retval = generic_write_checks(file, &origin, &count, 0);
if (retval)
retval = generic_write_checks(iocb, from);
if (retval <= 0)
return retval;
iov_iter_truncate(from, count);
if (!count)
return 0;
retval = p9_client_write(file->private_data, origin, from, &err);
origin = iocb->ki_pos;
retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
if (retval > 0) {
struct inode *inode = file_inode(file);
loff_t i_size;
@@ -428,12 +423,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (inode->i_mapping && inode->i_mapping->nrpages)
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
origin += retval;
iocb->ki_pos += retval;
i_size = i_size_read(inode);
iocb->ki_pos = origin;
if (origin > i_size) {
inode_add_bytes(inode, origin - i_size);
i_size_write(inode, origin);
if (iocb->ki_pos > i_size) {
inode_add_bytes(inode, iocb->ki_pos - i_size);
i_size_write(inode, iocb->ki_pos);
}
return retval;
}
+4 -5
View File
@@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to)
}
static ssize_t
affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
loff_t size = offset + count;
if (AFFS_I(inode)->mmu_private < size)
return 0;
}
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
if (ret < 0 && (rw & WRITE))
ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
if (ret < 0 && iov_iter_rw(iter) == WRITE)
affs_write_failed(mapping, offset + count);
return ret;
}
+1 -1
View File
@@ -1517,7 +1517,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
}
req->common.ki_pos = iocb->aio_offset;
req->common.ki_complete = aio_complete;
req->common.ki_flags = 0;
req->common.ki_flags = iocb_flags(req->common.ki_filp);
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
+16 -5
View File
@@ -146,15 +146,13 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
}
static ssize_t
blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
offset, blkdev_get_block,
NULL, NULL, 0);
return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
blkdev_get_block, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1597,9 +1595,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *bd_inode = file->f_mapping->host;
loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
ssize_t ret;
if (bdev_read_only(I_BDEV(bd_inode)))
return -EPERM;
if (!iov_iter_count(from))
return 0;
if (iocb->ki_pos >= size)
return -ENOSPC;
iov_iter_truncate(from, size - iocb->ki_pos);
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
if (ret > 0) {
+11 -17
View File
@@ -1739,27 +1739,19 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
u64 start_pos;
u64 end_pos;
ssize_t num_written = 0;
ssize_t err = 0;
size_t count = iov_iter_count(from);
bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
loff_t pos = iocb->ki_pos;
ssize_t err;
loff_t pos;
size_t count;
mutex_lock(&inode->i_mutex);
err = generic_write_checks(iocb, from);
if (err <= 0) {
mutex_unlock(&inode->i_mutex);
return err;
}
current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
if (count == 0) {
mutex_unlock(&inode->i_mutex);
goto out;
}
iov_iter_truncate(from, count);
err = file_remove_suid(file);
if (err) {
mutex_unlock(&inode->i_mutex);
@@ -1786,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
*/
update_time_for_write(inode);
pos = iocb->ki_pos;
count = iov_iter_count(from);
start_pos = round_down(pos, root->sectorsize);
if (start_pos > i_size_read(inode)) {
/* Expand hole size to cover write data, preventing empty gap */
@@ -1800,7 +1794,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers);
if (file->f_flags & O_DIRECT) {
if (iocb->ki_flags & IOCB_DIRECT) {
num_written = __btrfs_direct_write(iocb, from, pos);
} else {
num_written = __btrfs_buffered_write(file, from, pos);
+11 -11
View File
@@ -8081,7 +8081,7 @@ free_ordered:
bio_endio(dio_bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
int seg;
@@ -8096,7 +8096,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
goto out;
/* If this is a write we don't need to check anymore */
if (rw & WRITE)
if (iov_iter_rw(iter) == WRITE)
return 0;
/*
* Check to make sure we don't have duplicate iov_base's in this
@@ -8114,8 +8114,8 @@ out:
return retval;
}
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -8126,7 +8126,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
bool relock = false;
ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
return 0;
atomic_inc(&inode->i_dio_count);
@@ -8144,7 +8144,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
filemap_fdatawrite_range(inode->i_mapping, offset,
offset + count - 1);
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
/*
* If the write DIO is beyond the EOF, we need update
* the isize, but it is protected by i_mutex. So we can
@@ -8174,11 +8174,11 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
wakeup = false;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (rw & WRITE) {
ret = __blockdev_direct_IO(iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED)
btrfs_delalloc_release_space(inode, count);
+1 -2
View File
@@ -1198,8 +1198,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
* intercept O_DIRECT reads and writes early, this function should
* never get called.
*/
static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
struct iov_iter *iter,
static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos)
{
WARN_ON(1);
+10 -12
View File
@@ -457,7 +457,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
if (ret < 0)
return ret;
if (file->f_flags & O_DIRECT) {
if (iocb->ki_flags & IOCB_DIRECT) {
while (iov_iter_count(i)) {
size_t start;
ssize_t n;
@@ -828,7 +828,7 @@ again:
return ret;
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_filp->f_flags & O_DIRECT) ||
(iocb->ki_flags & IOCB_DIRECT) ||
(fi->flags & CEPH_F_SYNC)) {
dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
@@ -941,9 +941,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
ssize_t count = iov_iter_count(from), written = 0;
ssize_t count, written = 0;
int err, want, got;
loff_t pos = iocb->ki_pos;
loff_t pos;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -953,14 +953,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
err = generic_write_checks(iocb, from);
if (err <= 0)
goto out;
if (count == 0)
goto out;
iov_iter_truncate(from, count);
pos = iocb->ki_pos;
count = iov_iter_count(from);
err = file_remove_suid(file);
if (err)
goto out;
@@ -997,12 +995,12 @@ retry_snap:
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
(iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
struct iov_iter data;
mutex_unlock(&inode->i_mutex);
/* we might need to revert back to that point */
data = *from;
if (file->f_flags & O_DIRECT)
if (iocb->ki_flags & IOCB_DIRECT)
written = ceph_sync_direct_write(iocb, &data, pos);
else
written = ceph_sync_write(iocb, &data, pos);
+35 -58
View File
@@ -2560,10 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
return rc;
}
static ssize_t
cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
{
size_t len;
struct file *file = iocb->ki_filp;
ssize_t total_written = 0;
struct cifsFileInfo *open_file;
struct cifs_tcon *tcon;
@@ -2573,16 +2572,16 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
struct iov_iter saved_from;
int rc;
len = iov_iter_count(from);
rc = generic_write_checks(file, poffset, &len, 0);
if (rc)
/*
* BB - optimize the way when signing is disabled. We can drop this
* extra memory-to-memory copying and use iovec buffers for constructing
* write request.
*/
rc = generic_write_checks(iocb, from);
if (rc <= 0)
return rc;
if (!len)
return 0;
iov_iter_truncate(from, len);
INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_FILE_SB(file);
open_file = file->private_data;
@@ -2593,8 +2592,8 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
memcpy(&saved_from, from, sizeof(struct iov_iter));
rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
&wdata_list);
rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
open_file, cifs_sb, &wdata_list);
/*
* If at least one write was successfully sent, then discard any rc
@@ -2633,7 +2632,7 @@ restart_loop:
memcpy(&tmp_from, &saved_from,
sizeof(struct iov_iter));
iov_iter_advance(&tmp_from,
wdata->offset - *poffset);
wdata->offset - iocb->ki_pos);
rc = cifs_write_from_iter(wdata->offset,
wdata->bytes, &tmp_from,
@@ -2650,34 +2649,13 @@ restart_loop:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
if (total_written > 0)
*poffset += total_written;
if (unlikely(!total_written))
return rc;
iocb->ki_pos += total_written;
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
cifs_stats_bytes_written(tcon, total_written);
return total_written ? total_written : (ssize_t)rc;
}
ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t written;
struct inode *inode;
loff_t pos = iocb->ki_pos;
inode = file_inode(iocb->ki_filp);
/*
* BB - optimize the way when signing is disabled. We can drop this
* extra memory-to-memory copying and use iovec buffers for constructing
* write request.
*/
written = cifs_iovec_write(iocb->ki_filp, from, &pos);
if (written > 0) {
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
iocb->ki_pos = pos;
}
return written;
return total_written;
}
static ssize_t
@@ -2688,8 +2666,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc = -EACCES;
loff_t lock_pos = iocb->ki_pos;
ssize_t rc;
/*
* We need to hold the sem to be sure nobody modifies lock list
@@ -2697,23 +2674,24 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
*/
down_read(&cinode->lock_sem);
mutex_lock(&inode->i_mutex);
if (file->f_flags & O_APPEND)
lock_pos = i_size_read(inode);
if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
rc = generic_write_checks(iocb, from);
if (rc <= 0)
goto out;
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, NULL,
CIFS_WRITE_OP)) {
CIFS_WRITE_OP))
rc = __generic_file_write_iter(iocb, from);
mutex_unlock(&inode->i_mutex);
else
rc = -EACCES;
out:
mutex_unlock(&inode->i_mutex);
if (rc > 0) {
ssize_t err;
err = generic_write_sync(file, iocb->ki_pos - rc, rc);
if (err < 0)
rc = err;
}
} else {
mutex_unlock(&inode->i_mutex);
if (rc > 0) {
ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
if (err < 0)
rc = err;
}
up_read(&cinode->lock_sem);
return rc;
@@ -3877,8 +3855,7 @@ void cifs_oplock_break(struct work_struct *work)
* Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
loff_t pos)
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
/*
* FIXME
+1 -1
View File
@@ -1598,7 +1598,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
if (offset >= 0)
break;
default:
mutex_unlock(&file_inode(file)->i_mutex);
mutex_unlock(&dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
+13 -14
View File
@@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh)
return bh->b_state != 0;
}
static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
loff_t start, loff_t end, get_block_t get_block,
struct buffer_head *bh)
static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
loff_t start, loff_t end, get_block_t get_block,
struct buffer_head *bh)
{
ssize_t retval = 0;
loff_t pos = start;
@@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
void *addr;
bool hole = false;
if (rw != WRITE)
if (iov_iter_rw(iter) != WRITE)
end = min(end, i_size_read(inode));
while (pos < end) {
@@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
bh->b_size = PAGE_ALIGN(end - pos);
bh->b_state = 0;
retval = get_block(inode, block, bh,
rw == WRITE);
iov_iter_rw(iter) == WRITE);
if (retval)
break;
if (!buffer_size_valid(bh))
@@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
bh->b_size -= done;
}
hole = (rw != WRITE) && !buffer_written(bh);
hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
if (hole) {
addr = NULL;
size = bh->b_size - first;
@@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
max = min(pos + size, end);
}
if (rw == WRITE)
if (iov_iter_rw(iter) == WRITE)
len = copy_from_iter(addr, max - pos, iter);
else if (!hole)
len = copy_to_iter(addr, max - pos, iter);
@@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
/**
* dax_do_io - Perform I/O to a DAX file
* @rw: READ to read or WRITE to write
* @iocb: The control block for this I/O
* @inode: The file which the I/O is directed at
* @iter: The addresses to do I/O from or to
@@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
* As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
* is in progress.
*/
ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
struct iov_iter *iter, loff_t pos,
get_block_t get_block, dio_iodone_t end_io, int flags)
ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
struct iov_iter *iter, loff_t pos, get_block_t get_block,
dio_iodone_t end_io, int flags)
{
struct buffer_head bh;
ssize_t retval = -EINVAL;
@@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
memset(&bh, 0, sizeof(bh));
if ((flags & DIO_LOCKING) && (rw == READ)) {
if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
struct address_space *mapping = inode->i_mapping;
mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, pos, end - 1);
@@ -212,9 +211,9 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
/* Protects against truncate */
atomic_inc(&inode->i_dio_count);
retval = dax_io(rw, inode, iter, pos, end, get_block, &bh);
retval = dax_io(inode, iter, pos, end, get_block, &bh);
if ((flags & DIO_LOCKING) && (rw == READ))
if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if ((retval > 0) && end_io)
+39 -8
View File
@@ -269,6 +269,41 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
/*
* Make sure other CPUs see the inode attached before the type is set.
*/
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
{
unsigned flags;
dentry->d_inode = inode;
smp_wmb();
flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
flags |= type_flags;
WRITE_ONCE(dentry->d_flags, flags);
}
/*
* Ideally, we want to make sure that other CPUs see the flags cleared before
* the inode is detached, but this is really a violation of RCU principles
* since the ordering suggests we should always set inode before flags.
*
* We should instead replace or discard the entire dentry - but that sucks
* performancewise on mass deletion/rename.
*/
static inline void __d_clear_type_and_inode(struct dentry *dentry)
{
unsigned flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
WRITE_ONCE(dentry->d_flags, flags);
smp_wmb();
dentry->d_inode = NULL;
}
static void dentry_free(struct dentry *dentry)
{
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
@@ -311,7 +346,7 @@ static void dentry_iput(struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
@@ -335,8 +370,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
__d_clear_type(dentry);
dentry->d_inode = NULL;
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
@@ -1715,11 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
dentry->d_flags |= add_flags;
if (inode)
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
dentry->d_inode = inode;
__d_set_inode_and_type(dentry, inode, add_flags);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
@@ -1937,8 +1969,7 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
add_flags |= DCACHE_DISCONNECTED;
spin_lock(&tmp->d_lock);
tmp->d_inode = inode;
tmp->d_flags |= add_flags;
__d_set_inode_and_type(tmp, inode, add_flags);
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
+1 -1
View File
@@ -524,7 +524,7 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
if (debugfs_positive(dentry)) {
dget(dentry);
if (S_ISDIR(dentry->d_inode->i_mode))
if (d_is_dir(dentry))
ret = simple_rmdir(parent->d_inode, dentry);
else
simple_unlink(parent->d_inode, dentry);
+18 -21
View File
@@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)
* for the whole file.
*/
static inline ssize_t
do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
@@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
if (rw & WRITE)
rw = WRITE_ODIRECT;
/*
* Avoid references to bdev if not absolutely needed to give
* the early prefetch in the caller enough time.
@@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && !iov_iter_count(iter))
if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
if (rw == READ) {
if (iov_iter_rw(iter) == READ) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
@@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (is_sync_kiocb(iocb))
dio->is_async = false;
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
(rw & WRITE) && end > i_size_read(inode))
iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
dio->is_async = false;
else
dio->is_async = true;
dio->inode = inode;
dio->rw = rw;
dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
* so that we can call ->fsync.
*/
if (dio->is_async && (rw & WRITE) &&
if (dio->is_async && iov_iter_rw(iter) == WRITE &&
((iocb->ki_filp->f_flags & O_DSYNC) ||
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
retval = dio_set_defer_completion(dio);
@@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if (rw == READ && (dio->flags & DIO_LOCKING))
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
@@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
(rw == READ || dio->result == count))
(iov_iter_rw(iter) == READ || dio->result == count))
retval = -EIOCBQUEUED;
else
dio_await_completion(dio);
@@ -1300,11 +1297,11 @@ out:
return retval;
}
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags)
{
/*
* The block device state is needed in the end to finally
@@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
get_block, end_io, submit_io, flags);
return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
end_io, submit_io, flags);
}
EXPORT_SYMBOL(__blockdev_direct_IO);
+2 -2
View File
@@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset,
/* TODO: Should be easy enough to do proprly */
static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
return 0;
}

Some files were not shown because too many files have changed in this diff Show More