Files
linux-apfs/drivers/block/nbd.c
T

1131 lines
26 KiB
C
Raw Normal View History

2005-04-16 15:20:36 -07:00
/*
* Network block device - make block devices work over TCP
*
* Note that you can not swap over this thing, yet. Seems to work but
* deadlocks sometimes - you can not swap over TCP in general.
*
2010-07-18 14:27:13 +02:00
* Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
2005-04-16 15:20:36 -07:00
* Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
*
* This file is released under GPLv2 or later.
2005-04-16 15:20:36 -07:00
*
* (part of code stolen from loop.c)
2005-04-16 15:20:36 -07:00
*/
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/ioctl.h>
#include <linux/mutex.h>
2006-01-06 00:09:47 -08:00
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
2005-04-16 15:20:36 -07:00
#include <net/sock.h>
#include <linux/net.h>
2008-04-29 01:02:46 -07:00
#include <linux/kthread.h>
2015-04-02 10:11:37 +02:00
#include <linux/types.h>
2015-08-17 08:20:06 +02:00
#include <linux/debugfs.h>
2005-04-16 15:20:36 -07:00
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
2015-04-02 10:11:35 +02:00
struct nbd_device {
2015-08-17 08:20:09 +02:00
u32 flags;
2015-04-02 10:11:35 +02:00
struct socket * sock; /* If == NULL, device is not ready, yet */
int magic;
spinlock_t queue_lock;
struct list_head queue_head; /* Requests waiting result */
struct request *active_req;
wait_queue_head_t active_wq;
struct list_head waiting_queue; /* Requests to be sent */
wait_queue_head_t waiting_wq;
struct mutex tx_lock;
struct gendisk *disk;
int blksize;
2015-04-02 10:11:37 +02:00
loff_t bytesize;
2015-04-02 10:11:35 +02:00
int xmit_timeout;
bool timedout;
2015-08-17 08:20:07 +02:00
bool disconnect; /* a disconnect has been requested by user */
2015-08-17 08:20:00 +02:00
struct timer_list timeout_timer;
2015-10-29 11:51:16 +01:00
/* protects initialization and shutdown of the socket */
spinlock_t sock_lock;
2015-08-17 08:20:00 +02:00
struct task_struct *task_recv;
struct task_struct *task_send;
2015-08-17 08:20:06 +02:00
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
#endif
2015-04-02 10:11:35 +02:00
};
2015-08-17 08:20:06 +02:00
#if IS_ENABLED(CONFIG_DEBUG_FS)
static struct dentry *nbd_dbg_dir;
#endif
#define nbd_name(nbd) ((nbd)->disk->disk_name)
#define NBD_MAGIC 0x68797548
2005-04-16 15:20:36 -07:00
2006-07-01 04:36:36 -07:00
static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
2008-04-29 01:02:51 -07:00
static int max_part;
2005-04-16 15:20:36 -07:00
/*
* Use just one lock (or at most 1 per NIC). Two arguments for this:
* 1. Each NIC is essentially a synchronization point for all servers
* accessed through that NIC so there's no need to have more locks
* than NICs anyway.
* 2. More locks lead to more "Dirty cache line bouncing" which will slow
* down each lock to the point where they're actually slower than just
* a single lock.
* Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
*/
static DEFINE_SPINLOCK(nbd_lock);
2015-04-02 10:11:38 +02:00
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
2005-04-16 15:20:36 -07:00
{
2015-04-02 10:11:38 +02:00
return disk_to_dev(nbd->disk);
2005-04-16 15:20:36 -07:00
}
static bool nbd_is_connected(struct nbd_device *nbd)
{
return !!nbd->task_recv;
}
2005-04-16 15:20:36 -07:00
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
case NBD_CMD_READ: return "read";
case NBD_CMD_WRITE: return "write";
case NBD_CMD_DISC: return "disconnect";
2013-02-27 17:05:23 -08:00
case NBD_CMD_FLUSH: return "flush";
2012-10-04 17:16:18 -07:00
case NBD_CMD_TRIM: return "trim/discard";
2005-04-16 15:20:36 -07:00
}
return "invalid";
}
static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
{
bdev->bd_inode->i_size = 0;
set_capacity(nbd->disk, 0);
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
return 0;
}
static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
{
if (!nbd_is_connected(nbd))
return;
bdev->bd_inode->i_size = nbd->bytesize;
set_capacity(nbd->disk, nbd->bytesize >> 9);
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
}
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
int blocksize, int nr_blocks)
{
int ret;
ret = set_blocksize(bdev, blocksize);
if (ret)
return ret;
nbd->blksize = blocksize;
nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
nbd_size_update(nbd, bdev);
return 0;
}
2015-04-02 10:11:38 +02:00
static void nbd_end_request(struct nbd_device *nbd, struct request *req)
2005-04-16 15:20:36 -07:00
{
2007-12-11 17:44:06 -05:00
int error = req->errors ? -EIO : 0;
2007-07-24 09:28:11 +02:00
struct request_queue *q = req->q;
2005-04-16 15:20:36 -07:00
unsigned long flags;
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
error ? "failed" : "done");
2005-04-16 15:20:36 -07:00
spin_lock_irqsave(q->queue_lock, flags);
__blk_end_request_all(req, error);
2005-04-16 15:20:36 -07:00
spin_unlock_irqrestore(q->queue_lock, flags);
}
2015-04-02 10:11:39 +02:00
/*
* Forcibly shutdown the socket causing all listeners to error
*/
2015-08-17 08:20:01 +02:00
static void sock_shutdown(struct nbd_device *nbd)
2007-10-16 23:27:37 -07:00
{
2015-10-29 11:51:16 +01:00
spin_lock_irq(&nbd->sock_lock);
if (!nbd->sock) {
spin_unlock_irq(&nbd->sock_lock);
2015-08-17 08:20:02 +02:00
return;
2015-10-29 11:51:16 +01:00
}
2015-08-17 08:20:02 +02:00
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
2015-10-29 11:51:16 +01:00
sockfd_put(nbd->sock);
2015-08-17 08:20:02 +02:00
nbd->sock = NULL;
2015-10-29 11:51:16 +01:00
spin_unlock_irq(&nbd->sock_lock);
del_timer(&nbd->timeout_timer);
2007-10-16 23:27:37 -07:00
}
static void nbd_xmit_timeout(unsigned long arg)
{
2015-08-17 08:20:00 +02:00
struct nbd_device *nbd = (struct nbd_device *)arg;
2015-10-06 20:03:54 +02:00
unsigned long flags;
2007-10-16 23:27:37 -07:00
2015-08-17 08:20:00 +02:00
if (list_empty(&nbd->queue_head))
return;
2015-10-29 11:51:16 +01:00
spin_lock_irqsave(&nbd->sock_lock, flags);
2015-08-17 08:20:00 +02:00
nbd->timedout = true;
2015-10-06 20:03:54 +02:00
2015-10-29 11:51:16 +01:00
if (nbd->sock)
kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
2015-08-17 08:20:00 +02:00
2015-10-29 11:51:16 +01:00
spin_unlock_irqrestore(&nbd->sock_lock, flags);
2015-10-06 20:03:54 +02:00
2015-10-29 11:51:16 +01:00
dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
2007-10-16 23:27:37 -07:00
}
2005-04-16 15:20:36 -07:00
/*
* Send or receive packet.
*/
static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
2005-04-16 15:20:36 -07:00
int msg_flags)
{
struct socket *sock = nbd->sock;
2005-04-16 15:20:36 -07:00
int result;
struct msghdr msg;
struct kvec iov;
unsigned long pflags = current->flags;
2005-04-16 15:20:36 -07:00
if (unlikely(!sock)) {
dev_err(disk_to_dev(nbd->disk),
2011-08-19 14:48:22 +02:00
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
}
current->flags |= PF_MEMALLOC;
2005-04-16 15:20:36 -07:00
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
2005-04-16 15:20:36 -07:00
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2015-08-17 08:20:00 +02:00
if (send)
2005-04-16 15:20:36 -07:00
result = kernel_sendmsg(sock, &msg, &iov, 1, size);
2015-08-17 08:20:00 +02:00
else
2011-05-28 14:44:46 +02:00
result = kernel_recvmsg(sock, &msg, &iov, 1, size,
msg.msg_flags);
2005-04-16 15:20:36 -07:00
if (result <= 0) {
if (result == 0)
result = -EPIPE; /* short read */
break;
}
size -= result;
buf += result;
} while (size > 0);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
2005-04-16 15:20:36 -07:00
2015-08-17 08:20:00 +02:00
if (!send && nbd->xmit_timeout)
mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
2005-04-16 15:20:36 -07:00
return result;
}
static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
2005-04-16 15:20:36 -07:00
int flags)
{
int result;
void *kaddr = kmap(bvec->bv_page);
result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
bvec->bv_len, flags);
2005-04-16 15:20:36 -07:00
kunmap(bvec->bv_page);
return result;
}
2007-10-16 23:27:37 -07:00
/* always call with the tx_lock held */
static int nbd_send_req(struct nbd_device *nbd, struct request *req)
2005-04-16 15:20:36 -07:00
{
int result, flags;
2005-04-16 15:20:36 -07:00
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
2015-04-17 22:37:21 +02:00
u32 type;
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
type = NBD_CMD_DISC;
2016-06-05 14:32:17 -05:00
else if (req_op(req) == REQ_OP_DISCARD)
2015-04-17 22:37:21 +02:00
type = NBD_CMD_TRIM;
2016-06-05 14:32:23 -05:00
else if (req_op(req) == REQ_OP_FLUSH)
2015-04-17 22:37:21 +02:00
type = NBD_CMD_FLUSH;
else if (rq_data_dir(req) == WRITE)
type = NBD_CMD_WRITE;
else
type = NBD_CMD_READ;
2005-04-16 15:20:36 -07:00
memset(&request, 0, sizeof(request));
2005-04-16 15:20:36 -07:00
request.magic = htonl(NBD_REQUEST_MAGIC);
2015-04-17 22:37:21 +02:00
request.type = htonl(type);
if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
2013-02-27 17:05:23 -08:00
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
2005-04-16 15:20:36 -07:00
memcpy(request.handle, &req, sizeof(req));
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
2015-04-17 22:37:21 +02:00
req, nbdcmd_to_ascii(type),
2015-04-02 10:11:38 +02:00
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
2015-04-17 22:37:21 +02:00
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
2005-04-16 15:20:36 -07:00
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
2011-08-19 14:48:22 +02:00
"Send control failed (result %d)\n", result);
2015-04-02 10:11:40 +02:00
return -EIO;
2005-04-16 15:20:36 -07:00
}
2015-04-17 22:37:21 +02:00
if (type == NBD_CMD_WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
2005-04-16 15:20:36 -07:00
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
2007-08-16 13:43:12 +02:00
flags = 0;
2013-08-07 14:26:21 -07:00
if (!rq_iter_last(bvec, iter))
2007-08-16 13:43:12 +02:00
flags = MSG_MORE;
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
req, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
2007-08-16 13:43:12 +02:00
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
2011-08-19 14:48:22 +02:00
"Send data failed (result %d)\n",
result);
2015-04-02 10:11:40 +02:00
return -EIO;
2007-08-16 13:43:12 +02:00
}
2005-04-16 15:20:36 -07:00
}
}
return 0;
}
static struct request *nbd_find_request(struct nbd_device *nbd,
struct request *xreq)
2005-04-16 15:20:36 -07:00
{
struct request *req, *tmp;
2006-01-06 00:09:47 -08:00
int err;
2005-04-16 15:20:36 -07:00
err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
2006-01-06 00:09:47 -08:00
if (unlikely(err))
2015-04-02 10:11:41 +02:00
return ERR_PTR(err);
2006-01-06 00:09:47 -08:00
spin_lock(&nbd->queue_lock);
list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
2005-04-16 15:20:36 -07:00
if (req != xreq)
continue;
list_del_init(&req->queuelist);
spin_unlock(&nbd->queue_lock);
2005-04-16 15:20:36 -07:00
return req;
}
spin_unlock(&nbd->queue_lock);
2006-01-06 00:09:47 -08:00
2015-04-02 10:11:41 +02:00
return ERR_PTR(-ENOENT);
2005-04-16 15:20:36 -07:00
}
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
2005-04-16 15:20:36 -07:00
{
int result;
void *kaddr = kmap(bvec->bv_page);
result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
2005-04-16 15:20:36 -07:00
MSG_WAITALL);
kunmap(bvec->bv_page);
return result;
}
/* NULL returned = something went wrong, inform userspace */
static struct request *nbd_read_stat(struct nbd_device *nbd)
2005-04-16 15:20:36 -07:00
{
int result;
struct nbd_reply reply;
struct request *req;
reply.magic = 0;
result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
2005-04-16 15:20:36 -07:00
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
2011-08-19 14:48:22 +02:00
"Receive control failed (result %d)\n", result);
return ERR_PTR(result);
2005-04-16 15:20:36 -07:00
}
if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply.magic));
return ERR_PTR(-EPROTO);
}
req = nbd_find_request(nbd, *(struct request **)reply.handle);
2008-04-29 01:03:09 -07:00
if (IS_ERR(req)) {
2006-01-06 00:09:47 -08:00
result = PTR_ERR(req);
if (result != -ENOENT)
return ERR_PTR(result);
2006-01-06 00:09:47 -08:00
dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
2011-08-19 14:48:22 +02:00
reply.handle);
return ERR_PTR(-EBADR);
2005-04-16 15:20:36 -07:00
}
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
2011-08-19 14:48:22 +02:00
ntohl(reply.error));
2005-04-16 15:20:36 -07:00
req->errors++;
return req;
}
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
2015-04-17 22:37:21 +02:00
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(nbd, &bvec);
2007-08-16 13:43:12 +02:00
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
2011-08-19 14:48:22 +02:00
result);
2007-08-16 13:43:12 +02:00
req->errors++;
return req;
}
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
2005-04-16 15:20:36 -07:00
}
}
return req;
}
static ssize_t pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
2006-12-06 20:40:53 -08:00
{
struct gendisk *disk = dev_to_disk(dev);
2015-08-17 08:20:05 +02:00
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
2015-08-17 08:20:05 +02:00
return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
2006-12-06 20:40:53 -08:00
}
static struct device_attribute pid_attr = {
2008-10-18 20:28:50 -07:00
.attr = { .name = "pid", .mode = S_IRUGO},
2006-12-06 20:40:53 -08:00
.show = pid_show,
};
static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
2005-04-16 15:20:36 -07:00
{
struct request *req;
int ret;
2005-04-16 15:20:36 -07:00
BUG_ON(nbd->magic != NBD_MAGIC);
2005-04-16 15:20:36 -07:00
sk_set_memalloc(nbd->sock->sk);
2015-08-17 08:20:05 +02:00
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
return ret;
}
2006-12-06 20:40:53 -08:00
nbd_size_update(nbd, bdev);
while (1) {
req = nbd_read_stat(nbd);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
break;
}
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
}
2006-12-06 20:40:53 -08:00
nbd_size_clear(nbd, bdev);
2015-08-17 08:20:05 +02:00
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
2015-08-17 08:20:00 +02:00
return ret;
2005-04-16 15:20:36 -07:00
}
static void nbd_clear_que(struct nbd_device *nbd)
2005-04-16 15:20:36 -07:00
{
struct request *req;
BUG_ON(nbd->magic != NBD_MAGIC);
2005-04-16 15:20:36 -07:00
2006-01-06 00:09:47 -08:00
/*
* Because we have set nbd->sock to NULL under the tx_lock, all
2006-01-06 00:09:47 -08:00
* modifications to the list must have completed by now. For
* the same reason, the active_req must be NULL.
*
* As a consequence, we don't need to take the spin lock while
* purging the list here.
*/
BUG_ON(nbd->sock);
BUG_ON(nbd->active_req);
2006-01-06 00:09:47 -08:00
while (!list_empty(&nbd->queue_head)) {
req = list_entry(nbd->queue_head.next, struct request,
2006-01-06 00:09:47 -08:00
queuelist);
list_del_init(&req->queuelist);
req->errors++;
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
2006-01-06 00:09:47 -08:00
}
2012-09-17 14:09:02 -07:00
while (!list_empty(&nbd->waiting_queue)) {
req = list_entry(nbd->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
req->errors++;
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
2012-09-17 14:09:02 -07:00
}
2015-08-17 08:20:04 +02:00
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
2005-04-16 15:20:36 -07:00
}
2007-10-16 23:27:37 -07:00
static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
2008-04-29 01:02:46 -07:00
{
if (req->cmd_type != REQ_TYPE_FS)
2008-04-29 01:02:46 -07:00
goto error_out;
2015-04-17 22:37:21 +02:00
if (rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
dev_err(disk_to_dev(nbd->disk),
"Write on read-only\n");
goto error_out;
2013-02-27 17:05:23 -08:00
}
2008-04-29 01:02:46 -07:00
req->errors = 0;
mutex_lock(&nbd->tx_lock);
if (unlikely(!nbd->sock)) {
mutex_unlock(&nbd->tx_lock);
dev_err(disk_to_dev(nbd->disk),
2011-08-19 14:48:22 +02:00
"Attempted send on closed socket\n");
2009-04-02 16:58:42 -07:00
goto error_out;
2008-04-29 01:02:46 -07:00
}
nbd->active_req = req;
2008-04-29 01:02:46 -07:00
2015-08-17 08:20:00 +02:00
if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
if (nbd_send_req(nbd, req) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
2008-04-29 01:02:46 -07:00
req->errors++;
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
2008-04-29 01:02:46 -07:00
} else {
spin_lock(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->queue_head);
spin_unlock(&nbd->queue_lock);
2008-04-29 01:02:46 -07:00
}
nbd->active_req = NULL;
mutex_unlock(&nbd->tx_lock);
wake_up_all(&nbd->active_wq);
2008-04-29 01:02:46 -07:00
return;
error_out:
req->errors++;
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
2008-04-29 01:02:46 -07:00
}
static int nbd_thread_send(void *data)
2008-04-29 01:02:46 -07:00
{
struct nbd_device *nbd = data;
2008-04-29 01:02:46 -07:00
struct request *req;
2015-08-17 08:20:00 +02:00
nbd->task_send = current;
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
2008-04-29 01:02:46 -07:00
/* wait for something to do */
wait_event_interruptible(nbd->waiting_wq,
2008-04-29 01:02:46 -07:00
kthread_should_stop() ||
!list_empty(&nbd->waiting_queue));
2008-04-29 01:02:46 -07:00
/* extract request */
if (list_empty(&nbd->waiting_queue))
2008-04-29 01:02:46 -07:00
continue;
spin_lock_irq(&nbd->queue_lock);
req = list_entry(nbd->waiting_queue.next, struct request,
2008-04-29 01:02:46 -07:00
queuelist);
list_del_init(&req->queuelist);
spin_unlock_irq(&nbd->queue_lock);
2008-04-29 01:02:46 -07:00
/* handle request */
nbd_handle_req(nbd, req);
2008-04-29 01:02:46 -07:00
}
2015-08-17 08:20:00 +02:00
nbd->task_send = NULL;
2008-04-29 01:02:46 -07:00
return 0;
}
2005-04-16 15:20:36 -07:00
/*
* We always wait for result of write, for now. It would be nice to make it optional
* in future
* if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
2005-04-16 15:20:36 -07:00
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
static void nbd_request_handler(struct request_queue *q)
2013-02-27 17:05:28 -08:00
__releases(q->queue_lock) __acquires(q->queue_lock)
2005-04-16 15:20:36 -07:00
{
struct request *req;
while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *nbd;
2005-04-16 15:20:36 -07:00
2008-04-29 01:02:46 -07:00
spin_unlock_irq(q->queue_lock);
nbd = req->rq_disk->private_data;
2005-04-16 15:20:36 -07:00
BUG_ON(nbd->magic != NBD_MAGIC);
2005-04-16 15:20:36 -07:00
2015-04-02 10:11:38 +02:00
dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
req, req->cmd_type);
if (unlikely(!nbd->sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
2009-02-11 13:04:45 -08:00
req->errors++;
2015-04-02 10:11:38 +02:00
nbd_end_request(nbd, req);
2009-02-11 13:04:45 -08:00
spin_lock_irq(q->queue_lock);
continue;
}
spin_lock_irq(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->waiting_queue);
spin_unlock_irq(&nbd->queue_lock);
2005-04-16 15:20:36 -07:00
wake_up(&nbd->waiting_wq);
2006-01-06 00:09:47 -08:00
2005-04-16 15:20:36 -07:00
spin_lock_irq(q->queue_lock);
}
}
2015-10-29 11:51:16 +01:00
static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
{
int ret = 0;
spin_lock_irq(&nbd->sock_lock);
if (nbd->sock) {
ret = -EBUSY;
goto out;
}
nbd->sock = sock;
out:
spin_unlock_irq(&nbd->sock_lock);
return ret;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
nbd->disconnect = false;
nbd->timedout = false;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
nbd->xmit_timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
del_timer_sync(&nbd->timeout_timer);
}
static void nbd_bdev_reset(struct block_device *bdev)
{
set_device_ro(bdev, false);
bdev->bd_inode->i_size = 0;
if (max_part > 0) {
blkdev_reread_part(bdev);
bdev->bd_invalidated = 1;
}
}
2015-10-29 12:06:15 +01:00
static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
{
if (nbd->flags & NBD_FLAG_READ_ONLY)
set_device_ro(bdev, true);
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
blk_queue_write_cache(nbd->disk->queue, true, false);
2015-10-29 12:06:15 +01:00
else
blk_queue_write_cache(nbd->disk->queue, false, false);
2015-10-29 12:06:15 +01:00
}
2015-08-17 08:20:06 +02:00
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
2009-04-02 16:58:41 -07:00
/* Must be called with tx_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2009-04-02 16:58:41 -07:00
unsigned int cmd, unsigned long arg)
2005-04-16 15:20:36 -07:00
{
switch (cmd) {
2009-04-02 16:58:41 -07:00
case NBD_DISCONNECT: {
struct request sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->sock)
return -EINVAL;
2009-04-02 16:58:41 -07:00
mutex_unlock(&nbd->tx_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->tx_lock);
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_DRV_PRIV;
/* Check again after getting mutex back. */
if (!nbd->sock)
2005-04-16 15:20:36 -07:00
return -EINVAL;
2015-08-17 08:20:07 +02:00
nbd->disconnect = true;
2013-07-03 15:09:04 -07:00
nbd_send_req(nbd, &sreq);
2013-07-03 15:09:04 -07:00
return 0;
2009-04-02 16:58:41 -07:00
}
2005-04-16 15:20:36 -07:00
2015-10-29 11:51:16 +01:00
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
2012-09-17 14:09:02 -07:00
BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
2009-04-02 16:58:41 -07:00
return 0;
case NBD_SET_SOCK: {
2014-03-05 20:41:36 -05:00
int err;
2015-10-29 11:51:16 +01:00
struct socket *sock = sockfd_lookup(arg, &err);
if (!sock)
return err;
err = nbd_set_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
return err;
2009-04-02 16:58:41 -07:00
}
case NBD_SET_BLKSIZE: {
2016-03-05 00:49:31 +01:00
loff_t bsize = div_s64(nbd->bytesize, arg);
return nbd_size_set(nbd, bdev, arg, bsize);
}
2009-04-02 16:58:41 -07:00
2005-04-16 15:20:36 -07:00
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
arg / nbd->blksize);
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
2009-04-02 16:58:41 -07:00
2007-10-16 23:27:37 -07:00
case NBD_SET_TIMEOUT:
nbd->xmit_timeout = arg * HZ;
2015-08-17 08:20:00 +02:00
if (arg)
mod_timer(&nbd->timeout_timer,
jiffies + nbd->xmit_timeout);
else
del_timer_sync(&nbd->timeout_timer);
2007-10-16 23:27:37 -07:00
return 0;
2009-04-02 16:58:41 -07:00
2012-10-04 17:16:15 -07:00
case NBD_SET_FLAGS:
nbd->flags = arg;
return 0;
2009-04-02 16:58:41 -07:00
case NBD_DO_IT: {
struct task_struct *thread;
int error;
2015-08-17 08:20:05 +02:00
if (nbd->task_recv)
return -EBUSY;
2014-03-05 20:41:36 -05:00
if (!nbd->sock)
2005-04-16 15:20:36 -07:00
return -EINVAL;
2009-04-02 16:58:41 -07:00
2016-05-27 12:59:35 +02:00
/* We have to claim the device under the lock */
nbd->task_recv = current;
mutex_unlock(&nbd->tx_lock);
2009-04-02 16:58:41 -07:00
2015-10-29 12:06:15 +01:00
nbd_parse_flags(nbd, bdev);
2012-10-04 17:16:18 -07:00
thread = kthread_run(nbd_thread_send, nbd, "%s",
2015-08-17 08:20:06 +02:00
nbd_name(nbd));
2009-04-02 16:58:41 -07:00
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
2016-05-27 12:59:35 +02:00
nbd->task_recv = NULL;
2008-04-29 01:02:46 -07:00
return PTR_ERR(thread);
2009-04-02 16:58:41 -07:00
}
2015-08-17 08:20:06 +02:00
nbd_dev_dbg_init(nbd);
error = nbd_thread_recv(nbd, bdev);
2015-08-17 08:20:06 +02:00
nbd_dev_dbg_close(nbd);
2008-04-29 01:02:46 -07:00
kthread_stop(thread);
2009-04-02 16:58:41 -07:00
mutex_lock(&nbd->tx_lock);
2016-05-27 12:59:35 +02:00
nbd->task_recv = NULL;
2015-08-17 08:20:01 +02:00
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
nbd_bdev_reset(bdev);
2013-07-03 15:09:04 -07:00
if (nbd->disconnect) /* user requested, ignore socket errors */
error = 0;
if (nbd->timedout)
error = -ETIMEDOUT;
nbd_reset(nbd);
return error;
2009-04-02 16:58:41 -07:00
}
2005-04-16 15:20:36 -07:00
case NBD_CLEAR_QUE:
2006-01-06 00:09:47 -08:00
/*
* This is for compatibility only. The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK.
*/
2005-04-16 15:20:36 -07:00
return 0;
2009-04-02 16:58:41 -07:00
2005-04-16 15:20:36 -07:00
case NBD_PRINT_DEBUG:
dev_info(disk_to_dev(nbd->disk),
"next = %p, prev = %p, head = %p\n",
nbd->queue_head.next, nbd->queue_head.prev,
&nbd->queue_head);
2005-04-16 15:20:36 -07:00
return 0;
}
2009-04-02 16:58:41 -07:00
return -ENOTTY;
}
static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nbd_device *nbd = bdev->bd_disk->private_data;
2009-04-02 16:58:41 -07:00
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
BUG_ON(nbd->magic != NBD_MAGIC);
2009-04-02 16:58:41 -07:00
mutex_lock(&nbd->tx_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
mutex_unlock(&nbd->tx_lock);
2009-04-02 16:58:41 -07:00
return error;
2005-04-16 15:20:36 -07:00
}
2009-09-21 17:01:13 -07:00
static const struct block_device_operations nbd_fops =
2005-04-16 15:20:36 -07:00
{
.owner = THIS_MODULE,
2010-07-08 10:18:46 +02:00
.ioctl = nbd_ioctl,
2016-01-07 10:04:37 -05:00
.compat_ioctl = nbd_ioctl,
2005-04-16 15:20:36 -07:00
};
2015-08-17 08:20:06 +02:00
#if IS_ENABLED(CONFIG_DEBUG_FS)
static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
if (nbd->task_recv)
seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
if (nbd->task_send)
seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
return 0;
}
static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
{
return single_open(file, nbd_dbg_tasks_show, inode->i_private);
}
static const struct file_operations nbd_dbg_tasks_ops = {
.open = nbd_dbg_tasks_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
u32 flags = nbd->flags;
seq_printf(s, "Hex: 0x%08x\n\n", flags);
seq_puts(s, "Known flags:\n");
if (flags & NBD_FLAG_HAS_FLAGS)
seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
if (flags & NBD_FLAG_READ_ONLY)
seq_puts(s, "NBD_FLAG_READ_ONLY\n");
if (flags & NBD_FLAG_SEND_FLUSH)
seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
return 0;
}
static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
{
return single_open(file, nbd_dbg_flags_show, inode->i_private);
}
static const struct file_operations nbd_dbg_flags_ops = {
.open = nbd_dbg_flags_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
struct dentry *dir;
2015-10-24 21:15:34 +02:00
if (!nbd_dbg_dir)
return -EIO;
2015-08-17 08:20:06 +02:00
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
2015-10-24 21:15:34 +02:00
if (!dir) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
2015-08-17 08:20:06 +02:00
}
nbd->dbg_dir = dir;
2015-10-24 21:15:34 +02:00
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
2016-06-08 10:32:10 -04:00
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
2015-08-17 08:20:06 +02:00
return 0;
}
static void nbd_dev_dbg_close(struct nbd_device *nbd)
{
debugfs_remove_recursive(nbd->dbg_dir);
}
static int nbd_dbg_init(void)
{
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
2015-10-24 21:15:34 +02:00
if (!dbg_dir)
return -EIO;
2015-08-17 08:20:06 +02:00
nbd_dbg_dir = dbg_dir;
return 0;
}
static void nbd_dbg_close(void)
{
debugfs_remove_recursive(nbd_dbg_dir);
}
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
return 0;
}
static void nbd_dev_dbg_close(struct nbd_device *nbd)
{
}
static int nbd_dbg_init(void)
{
return 0;
}
static void nbd_dbg_close(void)
{
}
#endif
2005-04-16 15:20:36 -07:00
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
*/
static int __init nbd_init(void)
{
int err = -ENOMEM;
int i;
2008-04-29 01:02:51 -07:00
int part_shift;
2005-04-16 15:20:36 -07:00
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2005-04-16 15:20:36 -07:00
2008-04-29 01:02:51 -07:00
if (max_part < 0) {
2011-08-19 14:48:28 +02:00
printk(KERN_ERR "nbd: max_part must be >= 0\n");
2008-04-29 01:02:51 -07:00
return -EINVAL;
}
part_shift = 0;
if (max_part > 0) {
2008-04-29 01:02:51 -07:00
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can know the max number of
* partition kernel should be able to manage.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
2015-01-27 18:08:22 +05:30
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) {
2008-04-29 01:02:51 -07:00
struct gendisk *disk = alloc_disk(1 << part_shift);
2005-04-16 15:20:36 -07:00
if (!disk)
goto out;
nbd_dev[i].disk = disk;
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
2005-04-16 15:20:36 -07:00
if (!disk->queue) {
put_disk(disk);
goto out;
}
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
2012-10-04 17:16:18 -07:00
disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
2012-10-04 17:16:18 -07:00
disk->queue->limits.discard_zeroes_data = 0;
2013-04-30 15:28:28 -07:00
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
2005-04-16 15:20:36 -07:00
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
err = -EIO;
goto out;
}
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
2015-08-17 08:20:06 +02:00
nbd_dbg_init();
for (i = 0; i < nbds_max; i++) {
2005-04-16 15:20:36 -07:00
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
2008-04-29 01:02:46 -07:00
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
2005-04-16 15:20:36 -07:00
spin_lock_init(&nbd_dev[i].queue_lock);
2015-10-29 11:51:16 +01:00
spin_lock_init(&nbd_dev[i].sock_lock);
2005-04-16 15:20:36 -07:00
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
2006-03-23 03:00:38 -08:00
mutex_init(&nbd_dev[i].tx_lock);
2015-08-17 08:20:00 +02:00
init_timer(&nbd_dev[i].timeout_timer);
nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
2006-01-06 00:09:47 -08:00
init_waitqueue_head(&nbd_dev[i].active_wq);
2008-04-29 01:02:46 -07:00
init_waitqueue_head(&nbd_dev[i].waiting_wq);
2005-04-16 15:20:36 -07:00
disk->major = NBD_MAJOR;
2008-04-29 01:02:51 -07:00
disk->first_minor = i << part_shift;
2005-04-16 15:20:36 -07:00
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
nbd_reset(&nbd_dev[i]);
2005-04-16 15:20:36 -07:00
add_disk(disk);
}
return 0;
out:
while (i--) {
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
2008-08-20 14:09:07 -07:00
kfree(nbd_dev);
2005-04-16 15:20:36 -07:00
return err;
}
static void __exit nbd_cleanup(void)
{
int i;
2015-08-17 08:20:06 +02:00
nbd_dbg_close();
for (i = 0; i < nbds_max; i++) {
2005-04-16 15:20:36 -07:00
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = 0;
2005-04-16 15:20:36 -07:00
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
put_disk(disk);
}
}
unregister_blkdev(NBD_MAJOR, "nbd");
2008-08-20 14:09:07 -07:00
kfree(nbd_dev);
2005-04-16 15:20:36 -07:00
printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
module_init(nbd_init);
module_exit(nbd_cleanup);
MODULE_DESCRIPTION("Network Block Device");
MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
2008-04-29 01:02:51 -07:00
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");