You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'pull-work.iov_iter-rebased' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull more iov_iter updates from Al Viro:
- more new_sync_{read,write}() speedups - ITER_UBUF introduction
- ITER_PIPE cleanups
- unification of iov_iter_get_pages/iov_iter_get_pages_alloc and
switching them to advancing semantics
- making ITER_PIPE take high-order pages without splitting them
- handling copy_page_from_iter() for high-order pages properly
* tag 'pull-work.iov_iter-rebased' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (32 commits)
fix copy_page_from_iter() for compound destinations
hugetlbfs: copy_page_to_iter() can deal with compound pages
copy_page_to_iter(): don't split high-order page in case of ITER_PIPE
expand those iov_iter_advance()...
pipe_get_pages(): switch to append_pipe()
get rid of non-advancing variants
ceph: switch the last caller of iov_iter_get_pages_alloc()
9p: convert to advancing variant of iov_iter_get_pages_alloc()
af_alg_make_sg(): switch to advancing variant of iov_iter_get_pages()
iter_to_pipe(): switch to advancing variant of iov_iter_get_pages()
block: convert to advancing variants of iov_iter_get_pages{,_alloc}()
iov_iter: advancing variants of iov_iter_get_pages{,_alloc}()
iov_iter: saner helper for page array allocation
fold __pipe_get_pages() into pipe_get_pages()
ITER_XARRAY: don't open-code DIV_ROUND_UP()
unify the rest of iov_iter_get_pages()/iov_iter_get_pages_alloc() guts
unify xarray_get_pages() and xarray_get_pages_alloc()
unify pipe_get_pages() and pipe_get_pages_alloc()
iov_iter_get_pages(): sanity-check arguments
iov_iter_get_pages_alloc(): lift freeing pages array on failure exits into wrapper
...
This commit is contained in:
23
block/bio.c
23
block/bio.c
@@ -1200,7 +1200,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
struct page **pages = (struct page **)bv;
|
||||
ssize_t size, left;
|
||||
unsigned len, i = 0;
|
||||
size_t offset;
|
||||
size_t offset, trim;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
@@ -1218,16 +1218,19 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
* result to ensure the bio's total size is correct. The remainder of
|
||||
* the iov data will be picked up in the next bio iteration.
|
||||
*/
|
||||
size = iov_iter_get_pages(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
|
||||
size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
|
||||
nr_pages, &offset);
|
||||
if (size > 0) {
|
||||
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
|
||||
size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
|
||||
} else
|
||||
nr_pages = 0;
|
||||
if (unlikely(size <= 0))
|
||||
return size ? size : -EFAULT;
|
||||
|
||||
if (unlikely(size <= 0)) {
|
||||
ret = size ? size : -EFAULT;
|
||||
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
|
||||
|
||||
trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
|
||||
iov_iter_revert(iter, trim);
|
||||
|
||||
size -= trim;
|
||||
if (unlikely(!size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1246,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
iov_iter_advance(iter, size - left);
|
||||
iov_iter_revert(iter, left);
|
||||
out:
|
||||
while (i < nr_pages)
|
||||
put_page(pages[i++]);
|
||||
|
||||
@@ -254,7 +254,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||
size_t offs, added = 0;
|
||||
int npages;
|
||||
|
||||
bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
|
||||
bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
|
||||
if (unlikely(bytes <= 0)) {
|
||||
ret = bytes ? bytes : -EFAULT;
|
||||
goto out_unmap;
|
||||
@@ -284,7 +284,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||
bytes -= n;
|
||||
offs = 0;
|
||||
}
|
||||
iov_iter_advance(iter, added);
|
||||
}
|
||||
/*
|
||||
* release the pages we didn't map into the bio, if any
|
||||
@@ -293,8 +292,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||
put_page(pages[j++]);
|
||||
kvfree(pages);
|
||||
/* couldn't stuff something into bio? */
|
||||
if (bytes)
|
||||
if (bytes) {
|
||||
iov_iter_revert(iter, bytes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
|
||||
@@ -75,7 +75,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
should_dirty = true;
|
||||
} else {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
|
||||
@@ -204,7 +204,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
}
|
||||
|
||||
dio->size = 0;
|
||||
if (is_read && iter_is_iovec(iter))
|
||||
if (is_read && user_backed_iter(iter))
|
||||
dio->flags |= DIO_SHOULD_DIRTY;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
@@ -335,7 +335,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
dio->size = bio->bi_iter.bi_size;
|
||||
|
||||
if (is_read) {
|
||||
if (iter_is_iovec(iter)) {
|
||||
if (user_backed_iter(iter)) {
|
||||
dio->flags |= DIO_SHOULD_DIRTY;
|
||||
bio_set_pages_dirty(bio);
|
||||
}
|
||||
|
||||
@@ -404,7 +404,7 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
|
||||
ssize_t n;
|
||||
int npages, i;
|
||||
|
||||
n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
|
||||
n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
|
||||
if (n < 0)
|
||||
return n;
|
||||
|
||||
@@ -1191,7 +1191,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
|
||||
len += err;
|
||||
atomic_add(err, &ctx->rcvused);
|
||||
rsgl->sg_num_bytes = err;
|
||||
iov_iter_advance(&msg->msg_iter, err);
|
||||
}
|
||||
|
||||
*outlen = len;
|
||||
|
||||
@@ -102,11 +102,12 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
|
||||
&ctx->wait);
|
||||
af_alg_free_sg(&ctx->sgl);
|
||||
if (err)
|
||||
if (err) {
|
||||
iov_iter_revert(&msg->msg_iter, len);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
copied += len;
|
||||
iov_iter_advance(&msg->msg_iter, len);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
||||
@@ -643,14 +643,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
|
||||
size_t offset;
|
||||
unsigned int npages = 0;
|
||||
|
||||
bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
|
||||
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
|
||||
VHOST_SCSI_PREALLOC_UPAGES, &offset);
|
||||
/* No pages were pinned */
|
||||
if (bytes <= 0)
|
||||
return bytes < 0 ? bytes : -EFAULT;
|
||||
|
||||
iov_iter_advance(iter, bytes);
|
||||
|
||||
while (bytes) {
|
||||
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
|
||||
sg_set_page(sg++, pages[npages++], n, offset);
|
||||
|
||||
@@ -329,7 +329,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
|
||||
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
|
||||
iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
|
||||
err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
|
||||
err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
|
||||
if (err < 0) {
|
||||
dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
|
||||
goto out;
|
||||
|
||||
@@ -95,12 +95,11 @@ static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
|
||||
size_t start;
|
||||
int idx = 0;
|
||||
|
||||
bytes = iov_iter_get_pages(iter, pages, maxsize - size,
|
||||
bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
|
||||
ITER_GET_BVECS_PAGES, &start);
|
||||
if (bytes < 0)
|
||||
return size ?: bytes;
|
||||
|
||||
iov_iter_advance(iter, bytes);
|
||||
size += bytes;
|
||||
|
||||
for ( ; bytes; idx++, bvec_idx++) {
|
||||
@@ -1262,7 +1261,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
size_t count = iov_iter_count(iter);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
bool write = iov_iter_rw(iter) == WRITE;
|
||||
bool should_dirty = !write && iter_is_iovec(iter);
|
||||
bool should_dirty = !write && user_backed_iter(iter);
|
||||
|
||||
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
|
||||
return -EROFS;
|
||||
|
||||
@@ -3276,7 +3276,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
||||
if (ctx->direct_io) {
|
||||
ssize_t result;
|
||||
|
||||
result = iov_iter_get_pages_alloc(
|
||||
result = iov_iter_get_pages_alloc2(
|
||||
from, &pagevec, cur_len, &start);
|
||||
if (result < 0) {
|
||||
cifs_dbg(VFS,
|
||||
@@ -3290,7 +3290,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
||||
break;
|
||||
}
|
||||
cur_len = (size_t)result;
|
||||
iov_iter_advance(from, cur_len);
|
||||
|
||||
nr_pages =
|
||||
(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
@@ -4012,7 +4011,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
||||
if (ctx->direct_io) {
|
||||
ssize_t result;
|
||||
|
||||
result = iov_iter_get_pages_alloc(
|
||||
result = iov_iter_get_pages_alloc2(
|
||||
&direct_iov, &pagevec,
|
||||
cur_len, &start);
|
||||
if (result < 0) {
|
||||
@@ -4028,7 +4027,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
||||
break;
|
||||
}
|
||||
cur_len = (size_t)result;
|
||||
iov_iter_advance(&direct_iov, cur_len);
|
||||
|
||||
rdata = cifs_readdata_direct_alloc(
|
||||
pagevec, cifs_uncached_readv_complete);
|
||||
@@ -4258,7 +4256,7 @@ static ssize_t __cifs_readv(
|
||||
if (!is_sync_kiocb(iocb))
|
||||
ctx->iocb = iocb;
|
||||
|
||||
if (iter_is_iovec(to))
|
||||
if (user_backed_iter(to))
|
||||
ctx->should_dirty = true;
|
||||
|
||||
if (direct) {
|
||||
|
||||
@@ -1022,7 +1022,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
||||
saved_len = count;
|
||||
|
||||
while (count && npages < max_pages) {
|
||||
rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
|
||||
rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
|
||||
if (rc < 0) {
|
||||
cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
|
||||
break;
|
||||
@@ -1034,7 +1034,6 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
||||
break;
|
||||
}
|
||||
|
||||
iov_iter_advance(iter, rc);
|
||||
count -= rc;
|
||||
rc += start;
|
||||
cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
|
||||
|
||||
@@ -169,7 +169,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
|
||||
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
|
||||
ssize_t ret;
|
||||
|
||||
ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
|
||||
ret = iov_iter_get_pages2(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
|
||||
&sdio->from);
|
||||
|
||||
if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) {
|
||||
@@ -191,7 +191,6 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
iov_iter_advance(sdio->iter, ret);
|
||||
ret += sdio->from;
|
||||
sdio->head = 0;
|
||||
sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
@@ -1251,7 +1250,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
||||
spin_lock_init(&dio->bio_lock);
|
||||
dio->refcount = 1;
|
||||
|
||||
dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
|
||||
dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
|
||||
sdio.iter = iter;
|
||||
sdio.final_block_in_request = end >> blkbits;
|
||||
|
||||
|
||||
@@ -730,14 +730,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
|
||||
}
|
||||
} else {
|
||||
size_t off;
|
||||
err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
|
||||
err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
|
||||
if (err < 0)
|
||||
return err;
|
||||
BUG_ON(!err);
|
||||
cs->len = err;
|
||||
cs->offset = off;
|
||||
cs->pg = page;
|
||||
iov_iter_advance(cs->iter, err);
|
||||
}
|
||||
|
||||
return lock_request(cs->req);
|
||||
@@ -1356,7 +1355,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
|
||||
if (!fud)
|
||||
return -EPERM;
|
||||
|
||||
if (!iter_is_iovec(to))
|
||||
if (!user_backed_iter(to))
|
||||
return -EINVAL;
|
||||
|
||||
fuse_copy_init(&cs, 1, to);
|
||||
@@ -1949,7 +1948,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
if (!fud)
|
||||
return -EPERM;
|
||||
|
||||
if (!iter_is_iovec(from))
|
||||
if (!user_backed_iter(from))
|
||||
return -EINVAL;
|
||||
|
||||
fuse_copy_init(&cs, 0, from);
|
||||
|
||||
@@ -1414,14 +1414,13 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
||||
while (nbytes < *nbytesp && ap->num_pages < max_pages) {
|
||||
unsigned npages;
|
||||
size_t start;
|
||||
ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
|
||||
ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
|
||||
*nbytesp - nbytes,
|
||||
max_pages - ap->num_pages,
|
||||
&start);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
iov_iter_advance(ii, ret);
|
||||
nbytes += ret;
|
||||
|
||||
ret += start;
|
||||
@@ -1478,7 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
io->should_dirty = !write && iter_is_iovec(iter);
|
||||
io->should_dirty = !write && user_backed_iter(iter);
|
||||
while (count) {
|
||||
ssize_t nres;
|
||||
fl_owner_t owner = current->files;
|
||||
|
||||
@@ -780,7 +780,7 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
|
||||
|
||||
if (!count)
|
||||
return false;
|
||||
if (!iter_is_iovec(i))
|
||||
if (!user_backed_iter(i))
|
||||
return false;
|
||||
|
||||
size = PAGE_SIZE;
|
||||
|
||||
@@ -282,35 +282,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
}
|
||||
#endif
|
||||
|
||||
static size_t
|
||||
hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
||||
struct iov_iter *to, unsigned long size)
|
||||
{
|
||||
size_t copied = 0;
|
||||
int i, chunksize;
|
||||
|
||||
/* Find which 4k chunk and offset with in that chunk */
|
||||
i = offset >> PAGE_SHIFT;
|
||||
offset = offset & ~PAGE_MASK;
|
||||
|
||||
while (size) {
|
||||
size_t n;
|
||||
chunksize = PAGE_SIZE;
|
||||
if (offset)
|
||||
chunksize -= offset;
|
||||
if (chunksize > size)
|
||||
chunksize = size;
|
||||
n = copy_page_to_iter(&page[i], offset, chunksize, to);
|
||||
copied += n;
|
||||
if (n != chunksize)
|
||||
return copied;
|
||||
offset = 0;
|
||||
size -= chunksize;
|
||||
i++;
|
||||
}
|
||||
return copied;
|
||||
}
|
||||
|
||||
/*
|
||||
* Support for read() - Find the page attached to f_mapping and copy out the
|
||||
* data. This provides functionality similar to filemap_read().
|
||||
@@ -360,7 +331,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
/*
|
||||
* We have the page, copy it to user space buffer.
|
||||
*/
|
||||
copied = hugetlbfs_read_actor(page, offset, to, nr);
|
||||
copied = copy_page_to_iter(page, offset, nr, to);
|
||||
put_page(page);
|
||||
}
|
||||
offset += copied;
|
||||
|
||||
@@ -533,7 +533,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
iomi.flags |= IOMAP_NOWAIT;
|
||||
}
|
||||
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
dio->flags |= IOMAP_DIO_DIRTY;
|
||||
} else {
|
||||
iomi.flags |= IOMAP_WRITE;
|
||||
|
||||
@@ -364,13 +364,12 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
|
||||
size_t pgbase;
|
||||
unsigned npages, i;
|
||||
|
||||
result = iov_iter_get_pages_alloc(iter, &pagevec,
|
||||
result = iov_iter_get_pages_alloc2(iter, &pagevec,
|
||||
rsize, &pgbase);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
bytes = result;
|
||||
iov_iter_advance(iter, bytes);
|
||||
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct nfs_page *req;
|
||||
@@ -478,7 +477,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (!is_sync_kiocb(iocb))
|
||||
dreq->iocb = iocb;
|
||||
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
|
||||
|
||||
if (!swap)
|
||||
@@ -812,13 +811,12 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
|
||||
size_t pgbase;
|
||||
unsigned npages, i;
|
||||
|
||||
result = iov_iter_get_pages_alloc(iter, &pagevec,
|
||||
result = iov_iter_get_pages_alloc2(iter, &pagevec,
|
||||
wsize, &pgbase);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
bytes = result;
|
||||
iov_iter_advance(iter, bytes);
|
||||
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct nfs_page *req;
|
||||
|
||||
@@ -378,14 +378,13 @@ EXPORT_SYMBOL(rw_verify_area);
|
||||
|
||||
static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
struct iovec iov = { .iov_base = buf, .iov_len = len };
|
||||
struct kiocb kiocb;
|
||||
struct iov_iter iter;
|
||||
ssize_t ret;
|
||||
|
||||
init_sync_kiocb(&kiocb, filp);
|
||||
kiocb.ki_pos = (ppos ? *ppos : 0);
|
||||
iov_iter_init(&iter, READ, &iov, 1, len);
|
||||
iov_iter_ubuf(&iter, READ, buf, len);
|
||||
|
||||
ret = call_read_iter(filp, &kiocb, &iter);
|
||||
BUG_ON(ret == -EIOCBQUEUED);
|
||||
@@ -481,14 +480,13 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
|
||||
|
||||
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
|
||||
struct kiocb kiocb;
|
||||
struct iov_iter iter;
|
||||
ssize_t ret;
|
||||
|
||||
init_sync_kiocb(&kiocb, filp);
|
||||
kiocb.ki_pos = (ppos ? *ppos : 0);
|
||||
iov_iter_init(&iter, WRITE, &iov, 1, len);
|
||||
iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len);
|
||||
|
||||
ret = call_write_iter(filp, &kiocb, &iter);
|
||||
BUG_ON(ret == -EIOCBQUEUED);
|
||||
|
||||
54
fs/splice.c
54
fs/splice.c
@@ -301,11 +301,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||
{
|
||||
struct iov_iter to;
|
||||
struct kiocb kiocb;
|
||||
unsigned int i_head;
|
||||
int ret;
|
||||
|
||||
iov_iter_pipe(&to, READ, pipe, len);
|
||||
i_head = to.head;
|
||||
init_sync_kiocb(&kiocb, in);
|
||||
kiocb.ki_pos = *ppos;
|
||||
ret = call_read_iter(in, &kiocb, &to);
|
||||
@@ -313,9 +311,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||
*ppos = kiocb.ki_pos;
|
||||
file_accessed(in);
|
||||
} else if (ret < 0) {
|
||||
to.head = i_head;
|
||||
to.iov_offset = 0;
|
||||
iov_iter_advance(&to, 0); /* to free what was emitted */
|
||||
/* free what was emitted */
|
||||
pipe_discard_from(pipe, to.start_head);
|
||||
/*
|
||||
* callers of ->splice_read() expect -EAGAIN on
|
||||
* "can't put anything in there", rather than -EFAULT.
|
||||
@@ -1161,39 +1158,40 @@ static int iter_to_pipe(struct iov_iter *from,
|
||||
};
|
||||
size_t total = 0;
|
||||
int ret = 0;
|
||||
bool failed = false;
|
||||
|
||||
while (iov_iter_count(from) && !failed) {
|
||||
while (iov_iter_count(from)) {
|
||||
struct page *pages[16];
|
||||
ssize_t copied;
|
||||
ssize_t left;
|
||||
size_t start;
|
||||
int n;
|
||||
int i, n;
|
||||
|
||||
copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
|
||||
if (copied <= 0) {
|
||||
ret = copied;
|
||||
left = iov_iter_get_pages2(from, pages, ~0UL, 16, &start);
|
||||
if (left <= 0) {
|
||||
ret = left;
|
||||
break;
|
||||
}
|
||||
|
||||
for (n = 0; copied; n++, start = 0) {
|
||||
int size = min_t(int, copied, PAGE_SIZE - start);
|
||||
if (!failed) {
|
||||
buf.page = pages[n];
|
||||
buf.offset = start;
|
||||
buf.len = size;
|
||||
ret = add_to_pipe(pipe, &buf);
|
||||
if (unlikely(ret < 0)) {
|
||||
failed = true;
|
||||
} else {
|
||||
iov_iter_advance(from, ret);
|
||||
total += ret;
|
||||
}
|
||||
} else {
|
||||
put_page(pages[n]);
|
||||
n = DIV_ROUND_UP(left + start, PAGE_SIZE);
|
||||
for (i = 0; i < n; i++) {
|
||||
int size = min_t(int, left, PAGE_SIZE - start);
|
||||
|
||||
buf.page = pages[i];
|
||||
buf.offset = start;
|
||||
buf.len = size;
|
||||
ret = add_to_pipe(pipe, &buf);
|
||||
if (unlikely(ret < 0)) {
|
||||
iov_iter_revert(from, left);
|
||||
// this one got dropped by add_to_pipe()
|
||||
while (++i < n)
|
||||
put_page(pages[i]);
|
||||
goto out;
|
||||
}
|
||||
copied -= size;
|
||||
total += ret;
|
||||
left -= size;
|
||||
start = 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return total ? total : ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -156,26 +156,6 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
|
||||
return pipe_occupancy(head, tail) >= limit;
|
||||
}
|
||||
|
||||
/**
|
||||
* pipe_space_for_user - Return number of slots available to userspace
|
||||
* @head: The pipe ring head pointer
|
||||
* @tail: The pipe ring tail pointer
|
||||
* @pipe: The pipe info structure
|
||||
*/
|
||||
static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
|
||||
struct pipe_inode_info *pipe)
|
||||
{
|
||||
unsigned int p_occupancy, p_space;
|
||||
|
||||
p_occupancy = pipe_occupancy(head, tail);
|
||||
if (p_occupancy >= pipe->max_usage)
|
||||
return 0;
|
||||
p_space = pipe->ring_size - p_occupancy;
|
||||
if (p_space > pipe->max_usage)
|
||||
p_space = pipe->max_usage;
|
||||
return p_space;
|
||||
}
|
||||
|
||||
/**
|
||||
* pipe_buf_get - get a reference to a pipe_buffer
|
||||
* @pipe: the pipe that the buffer belongs to
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user