You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-4.7/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"On top of the core pull request, this is the drivers pull request for
this merge window. This contains:
- Switch drivers to the new write back cache API, and kill off the
flush flags. From me.
- Kill the discard support for the STEC pci-e flash driver. It's
trivially broken, and apparently unmaintained, so it's safer to
just remove it. From Jeff Moyer.
- A set of lightnvm updates from the usual suspects (Matias/Javier,
and Simon), and fixes from Arnd, Jeff Mahoney, Sagi, and Wenwei
Tao.
- A set of updates for NVMe:
- Turn the controller state management into a proper state
machine. From Christoph.
- Shuffling of code in preparation for NVMe-over-fabrics, also
from Christoph.
- Cleanup of the command prep part from Ming Lin.
- Rewrite of the discard support from Ming Lin.
- Deadlock fix for namespace removal from Ming Lin.
- Use the now exported blk-mq tag helper for IO termination.
From Sagi.
- Various little fixes from Christoph, Guilherme, Keith, Ming
Lin, Wang Sheng-Hui.
- Convert mtip32xx to use the now exported blk-mq tag iter function,
from Keith"
* 'for-4.7/drivers' of git://git.kernel.dk/linux-block: (74 commits)
lightnvm: reserved space calculation incorrect
lightnvm: rename nr_pages to nr_ppas on nvm_rq
lightnvm: add is_cached entry to struct ppa_addr
lightnvm: expose gennvm_mark_blk to targets
lightnvm: remove mgt targets on mgt removal
lightnvm: pass dma address to hardware rather than pointer
lightnvm: do not assume sequential lun alloc.
nvme/lightnvm: Log using the ctrl named device
lightnvm: rename dma helper functions
lightnvm: enable metadata to be sent to device
lightnvm: do not free unused metadata on rrpc
lightnvm: fix out of bound ppa lun id on bb tbl
lightnvm: refactor set_bb_tbl for accepting ppa list
lightnvm: move responsibility for bad blk mgmt to target
lightnvm: make nvm_set_rqd_ppalist() aware of vblks
lightnvm: remove struct factory_blks
lightnvm: refactor device ops->get_bb_tbl()
lightnvm: introduce nvm_for_each_lun_ppa() macro
lightnvm: refactor dev->online_target to global nvm_targets
lightnvm: rename nvm_targets to nvm_tgt_type
...
This commit is contained in:
@@ -71,7 +71,7 @@ requests that have a payload. For devices with volatile write caches the
|
||||
driver needs to tell the block layer that it supports flushing caches by
|
||||
doing:
|
||||
|
||||
blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(sdkp->disk->queue, true, false);
|
||||
|
||||
and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
|
||||
REQ_FLUSH requests with a payload are automatically turned into a sequence
|
||||
@@ -79,7 +79,7 @@ of an empty REQ_FLUSH request followed by the actual write by the block
|
||||
layer. For devices that also support the FUA bit the block layer needs
|
||||
to be told to pass through the REQ_FUA bit using:
|
||||
|
||||
blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
|
||||
blk_queue_write_cache(sdkp->disk->queue, true, true);
|
||||
|
||||
and the driver must handle write requests that have the REQ_FUA bit set
|
||||
in prep_fn/request_fn. If the FUA bit is not natively supported the block
|
||||
|
||||
@@ -862,7 +862,7 @@ static int ubd_add(int n, char **error_out)
|
||||
goto out;
|
||||
}
|
||||
ubd_dev->queue->queuedata = ubd_dev;
|
||||
blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(ubd_dev->queue, true, false);
|
||||
|
||||
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
|
||||
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
|
||||
|
||||
+2
-1
@@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
|
||||
* drivers without flush support don't have to worry
|
||||
* about them.
|
||||
*/
|
||||
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
|
||||
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
|
||||
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
|
||||
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
||||
if (!nr_sectors) {
|
||||
err = 0;
|
||||
|
||||
+6
-5
@@ -95,17 +95,18 @@ enum {
|
||||
static bool blk_kick_flush(struct request_queue *q,
|
||||
struct blk_flush_queue *fq);
|
||||
|
||||
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
|
||||
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
|
||||
{
|
||||
unsigned int policy = 0;
|
||||
|
||||
if (blk_rq_sectors(rq))
|
||||
policy |= REQ_FSEQ_DATA;
|
||||
|
||||
if (fflags & REQ_FLUSH) {
|
||||
if (fflags & (1UL << QUEUE_FLAG_WC)) {
|
||||
if (rq->cmd_flags & REQ_FLUSH)
|
||||
policy |= REQ_FSEQ_PREFLUSH;
|
||||
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
|
||||
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
|
||||
(rq->cmd_flags & REQ_FUA))
|
||||
policy |= REQ_FSEQ_POSTFLUSH;
|
||||
}
|
||||
return policy;
|
||||
@@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
|
||||
void blk_insert_flush(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned int fflags = q->flush_flags; /* may change, cache */
|
||||
unsigned long fflags = q->queue_flags; /* may change, cache */
|
||||
unsigned int policy = blk_flush_policy(fflags, rq);
|
||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
||||
|
||||
@@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
|
||||
* REQ_FLUSH and FUA for the driver.
|
||||
*/
|
||||
rq->cmd_flags &= ~REQ_FLUSH;
|
||||
if (!(fflags & REQ_FUA))
|
||||
if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
|
||||
rq->cmd_flags &= ~REQ_FUA;
|
||||
|
||||
/*
|
||||
|
||||
+2
-3
@@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
|
||||
}
|
||||
}
|
||||
|
||||
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
||||
void *priv)
|
||||
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
|
||||
busy_tag_iter_fn *fn, void *priv)
|
||||
{
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
|
||||
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
|
||||
false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
|
||||
|
||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
busy_tag_iter_fn *fn, void *priv)
|
||||
|
||||
+10
-28
@@ -820,29 +820,14 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_queue_flush - configure queue's cache flush capability
|
||||
* @q: the request queue for the device
|
||||
* @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
|
||||
*
|
||||
* Tell block layer cache flush capability of @q. If it supports
|
||||
* flushing, REQ_FLUSH should be set. If it supports bypassing
|
||||
* write cache for individual writes, REQ_FUA should be set.
|
||||
*/
|
||||
void blk_queue_flush(struct request_queue *q, unsigned int flush)
|
||||
{
|
||||
WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
|
||||
|
||||
if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
|
||||
flush &= ~REQ_FUA;
|
||||
|
||||
q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_flush);
|
||||
|
||||
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
||||
{
|
||||
q->flush_not_queueable = !queueable;
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (queueable)
|
||||
clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
||||
else
|
||||
set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
||||
|
||||
@@ -857,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
||||
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (wc) {
|
||||
if (wc)
|
||||
queue_flag_set(QUEUE_FLAG_WC, q);
|
||||
q->flush_flags = REQ_FLUSH;
|
||||
} else
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_WC, q);
|
||||
if (fua) {
|
||||
if (wc)
|
||||
q->flush_flags |= REQ_FUA;
|
||||
if (fua)
|
||||
queue_flag_set(QUEUE_FLAG_FUA, q);
|
||||
} else
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_FUA, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
@@ -2761,7 +2761,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
||||
q->backing_dev_info.congested_data = device;
|
||||
|
||||
blk_queue_make_request(q, drbd_make_request);
|
||||
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
|
||||
blk_queue_write_cache(q, true, true);
|
||||
/* Setting the max_hw_sectors to an odd value of 8kibyte here
|
||||
This triggers a max_bio_size message upon first attach or connect */
|
||||
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
||||
|
||||
@@ -943,7 +943,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
||||
|
||||
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
||||
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(lo->lo_queue, true, false);
|
||||
|
||||
loop_update_dio(lo);
|
||||
set_capacity(lo->lo_disk, size);
|
||||
|
||||
@@ -3000,14 +3000,14 @@ restart_eh:
|
||||
"Completion workers still active!");
|
||||
|
||||
spin_lock(dd->queue->queue_lock);
|
||||
blk_mq_all_tag_busy_iter(*dd->tags.tags,
|
||||
blk_mq_tagset_busy_iter(&dd->tags,
|
||||
mtip_queue_cmd, dd);
|
||||
spin_unlock(dd->queue->queue_lock);
|
||||
|
||||
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
|
||||
|
||||
if (mtip_device_reset(dd))
|
||||
blk_mq_all_tag_busy_iter(*dd->tags.tags,
|
||||
blk_mq_tagset_busy_iter(&dd->tags,
|
||||
mtip_abort_cmd, dd);
|
||||
|
||||
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
|
||||
@@ -4023,12 +4023,6 @@ skip_create_disk:
|
||||
blk_queue_io_min(dd->queue, 4096);
|
||||
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
|
||||
|
||||
/*
|
||||
* write back cache is not supported in the device. FUA depends on
|
||||
* write back cache support, hence setting flush support to zero.
|
||||
*/
|
||||
blk_queue_flush(dd->queue, 0);
|
||||
|
||||
/* Signal trim support */
|
||||
if (dd->trim_supp == true) {
|
||||
set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
|
||||
@@ -4174,7 +4168,7 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||
|
||||
blk_mq_freeze_queue_start(dd->queue);
|
||||
blk_mq_stop_hw_queues(dd->queue);
|
||||
blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
|
||||
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
|
||||
|
||||
/*
|
||||
* Delete our gendisk structure. This also removes the device
|
||||
|
||||
+2
-2
@@ -693,9 +693,9 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
|
||||
if (nbd->flags & NBD_FLAG_SEND_TRIM)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
|
||||
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
|
||||
blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(nbd->disk->queue, true, false);
|
||||
else
|
||||
blk_queue_flush(nbd->disk->queue, 0);
|
||||
blk_queue_write_cache(nbd->disk->queue, false, false);
|
||||
}
|
||||
|
||||
static int nbd_dev_dbg_init(struct nbd_device *nbd);
|
||||
|
||||
@@ -437,7 +437,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
|
||||
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
|
||||
|
||||
blk_queue_prep_rq(q, blk_queue_start_tag);
|
||||
blk_queue_flush(q, REQ_FLUSH);
|
||||
blk_queue_write_cache(q, true, false);
|
||||
|
||||
disk->queue = q;
|
||||
|
||||
|
||||
@@ -468,7 +468,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
|
||||
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
||||
blk_queue_logical_block_size(queue, dev->blk_size);
|
||||
|
||||
blk_queue_flush(queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(queue, true, false);
|
||||
|
||||
blk_queue_max_segments(queue, -1);
|
||||
blk_queue_max_segment_size(queue, dev->bounce_size);
|
||||
|
||||
@@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
|
||||
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
|
||||
|
||||
#define INQ_STD_NBYTES 36
|
||||
#define SKD_DISCARD_CDB_LENGTH 24
|
||||
|
||||
enum skd_drvr_state {
|
||||
SKD_DRVR_STATE_LOAD,
|
||||
@@ -212,7 +211,6 @@ struct skd_request_context {
|
||||
|
||||
struct request *req;
|
||||
u8 flush_cmd;
|
||||
u8 discard_page;
|
||||
|
||||
u32 timeout_stamp;
|
||||
u8 sg_data_dir;
|
||||
@@ -230,7 +228,6 @@ struct skd_request_context {
|
||||
};
|
||||
#define SKD_DATA_DIR_HOST_TO_CARD 1
|
||||
#define SKD_DATA_DIR_CARD_TO_HOST 2
|
||||
#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
|
||||
|
||||
struct skd_special_context {
|
||||
struct skd_request_context req;
|
||||
@@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
|
||||
scsi_req->cdb[9] = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
|
||||
struct skd_request_context *skreq,
|
||||
struct page *page,
|
||||
u32 lba, u32 count)
|
||||
{
|
||||
char *buf;
|
||||
unsigned long len;
|
||||
struct request *req;
|
||||
|
||||
buf = page_address(page);
|
||||
len = SKD_DISCARD_CDB_LENGTH;
|
||||
|
||||
scsi_req->cdb[0] = UNMAP;
|
||||
scsi_req->cdb[8] = len;
|
||||
|
||||
put_unaligned_be16(6 + 16, &buf[0]);
|
||||
put_unaligned_be16(16, &buf[2]);
|
||||
put_unaligned_be64(lba, &buf[8]);
|
||||
put_unaligned_be32(count, &buf[16]);
|
||||
|
||||
req = skreq->req;
|
||||
blk_add_request_payload(req, page, 0, len);
|
||||
}
|
||||
|
||||
static void skd_request_fn_not_online(struct request_queue *q);
|
||||
|
||||
static void skd_request_fn(struct request_queue *q)
|
||||
@@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
|
||||
struct skd_request_context *skreq;
|
||||
struct request *req = NULL;
|
||||
struct skd_scsi_request *scsi_req;
|
||||
struct page *page;
|
||||
unsigned long io_flags;
|
||||
int error;
|
||||
u32 lba;
|
||||
@@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
|
||||
skreq->flush_cmd = 0;
|
||||
skreq->n_sg = 0;
|
||||
skreq->sg_byte_count = 0;
|
||||
skreq->discard_page = 0;
|
||||
|
||||
/*
|
||||
* OK to now dequeue request from q.
|
||||
@@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
|
||||
else
|
||||
skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
|
||||
|
||||
if (io_flags & REQ_DISCARD) {
|
||||
page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!page) {
|
||||
pr_err("request_fn:Page allocation failed.\n");
|
||||
skd_end_request(skdev, skreq, -ENOMEM);
|
||||
break;
|
||||
}
|
||||
skreq->discard_page = 1;
|
||||
req->completion_data = page;
|
||||
skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
|
||||
|
||||
} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
|
||||
if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
|
||||
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
|
||||
SKD_ASSERT(skreq->flush_cmd == 1);
|
||||
|
||||
@@ -851,16 +810,6 @@ skip_sg:
|
||||
static void skd_end_request(struct skd_device *skdev,
|
||||
struct skd_request_context *skreq, int error)
|
||||
{
|
||||
struct request *req = skreq->req;
|
||||
unsigned int io_flags = req->cmd_flags;
|
||||
|
||||
if ((io_flags & REQ_DISCARD) &&
|
||||
(skreq->discard_page == 1)) {
|
||||
pr_debug("%s:%s:%d, free the page!",
|
||||
skdev->name, __func__, __LINE__);
|
||||
__free_page(req->completion_data);
|
||||
}
|
||||
|
||||
if (unlikely(error)) {
|
||||
struct request *req = skreq->req;
|
||||
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
|
||||
@@ -4412,19 +4361,13 @@ static int skd_cons_disk(struct skd_device *skdev)
|
||||
disk->queue = q;
|
||||
q->queuedata = skdev;
|
||||
|
||||
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
|
||||
blk_queue_write_cache(q, true, true);
|
||||
blk_queue_max_segments(q, skdev->sgs_per_request);
|
||||
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
|
||||
|
||||
/* set sysfs ptimal_io_size to 8K */
|
||||
blk_queue_io_opt(q, 8192);
|
||||
|
||||
/* DISCARD Flag initialization. */
|
||||
q->limits.discard_granularity = 8192;
|
||||
q->limits.discard_alignment = 0;
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||||
q->limits.discard_zeroes_data = 1;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
|
||||
|
||||
@@ -493,11 +493,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
|
||||
u8 writeback = virtblk_get_cache_mode(vdev);
|
||||
struct virtio_blk *vblk = vdev->priv;
|
||||
|
||||
if (writeback)
|
||||
blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
|
||||
else
|
||||
blk_queue_flush(vblk->disk->queue, 0);
|
||||
|
||||
blk_queue_write_cache(vblk->disk->queue, writeback, false);
|
||||
revalidate_disk(vblk->disk);
|
||||
}
|
||||
|
||||
|
||||
@@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
|
||||
vbd->type |= VDISK_REMOVABLE;
|
||||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (q && q->flush_flags)
|
||||
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
vbd->flush_support = true;
|
||||
|
||||
if (q && blk_queue_secdiscard(q))
|
||||
|
||||
@@ -998,7 +998,8 @@ static const char *flush_info(unsigned int feature_flush)
|
||||
|
||||
static void xlvbd_flush(struct blkfront_info *info)
|
||||
{
|
||||
blk_queue_flush(info->rq, info->feature_flush);
|
||||
blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
|
||||
info->feature_flush & REQ_FUA);
|
||||
pr_info("blkfront: %s: %s %s %s %s %s\n",
|
||||
info->gd->disk_name, flush_info(info->feature_flush),
|
||||
"persistent grants:", info->feature_persistent ?
|
||||
|
||||
@@ -522,7 +522,7 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
|
||||
static void update_flush(ide_drive_t *drive)
|
||||
{
|
||||
u16 *id = drive->id;
|
||||
unsigned flush = 0;
|
||||
bool wc = false;
|
||||
|
||||
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
|
||||
unsigned long long capacity;
|
||||
@@ -546,12 +546,12 @@ static void update_flush(ide_drive_t *drive)
|
||||
drive->name, barrier ? "" : "not ");
|
||||
|
||||
if (barrier) {
|
||||
flush = REQ_FLUSH;
|
||||
wc = true;
|
||||
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
|
||||
}
|
||||
}
|
||||
|
||||
blk_queue_flush(drive->queue, flush);
|
||||
blk_queue_write_cache(drive->queue, wc, false);
|
||||
}
|
||||
|
||||
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
|
||||
|
||||
+250
-120
File diff suppressed because it is too large
Load Diff
+53
-47
@@ -129,27 +129,25 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
||||
void *private)
|
||||
static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
|
||||
u8 *blks, int nr_blks)
|
||||
{
|
||||
struct gen_nvm *gn = private;
|
||||
struct nvm_dev *dev = gn->dev;
|
||||
struct gen_lun *lun;
|
||||
struct nvm_block *blk;
|
||||
int i;
|
||||
|
||||
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
|
||||
if (nr_blks < 0)
|
||||
return nr_blks;
|
||||
|
||||
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] == 0)
|
||||
continue;
|
||||
|
||||
blk = &lun->vlun.blocks[i];
|
||||
if (!blk) {
|
||||
pr_err("gennvm: BB data is out of bounds.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
lun->vlun.nr_free_blocks--;
|
||||
@@ -216,13 +214,21 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
struct gen_lun *lun;
|
||||
struct nvm_block *block;
|
||||
sector_t lun_iter, blk_iter, cur_block_id = 0;
|
||||
int ret;
|
||||
int ret, nr_blks;
|
||||
u8 *blks;
|
||||
|
||||
nr_blks = dev->blks_per_lun * dev->plane_mode;
|
||||
blks = kmalloc(nr_blks, GFP_KERNEL);
|
||||
if (!blks)
|
||||
return -ENOMEM;
|
||||
|
||||
gennvm_for_each_lun(gn, lun, lun_iter) {
|
||||
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
|
||||
dev->blks_per_lun);
|
||||
if (!lun->vlun.blocks)
|
||||
if (!lun->vlun.blocks) {
|
||||
kfree(blks);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
|
||||
block = &lun->vlun.blocks[blk_iter];
|
||||
@@ -246,14 +252,15 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
|
||||
ppa.ppa = 0;
|
||||
ppa.g.ch = lun->vlun.chnl_id;
|
||||
ppa.g.lun = lun->vlun.id;
|
||||
ppa = generic_to_dev_addr(dev, ppa);
|
||||
ppa.g.lun = lun->vlun.lun_id;
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, ppa,
|
||||
dev->blks_per_lun,
|
||||
gennvm_block_bb, gn);
|
||||
ret = nvm_get_bb_tbl(dev, ppa, blks);
|
||||
if (ret)
|
||||
pr_err("gennvm: could not read BB table\n");
|
||||
pr_err("gennvm: could not get BB table\n");
|
||||
|
||||
ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
|
||||
if (ret)
|
||||
pr_err("gennvm: BB table map failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,6 +273,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
}
|
||||
}
|
||||
|
||||
kfree(blks);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -399,64 +407,60 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
spin_unlock(&vlun->lock);
|
||||
}
|
||||
|
||||
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||
int type)
|
||||
static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
|
||||
{
|
||||
struct gen_nvm *gn = dev->mp;
|
||||
struct gen_lun *lun;
|
||||
struct nvm_block *blk;
|
||||
|
||||
if (unlikely(ppa->g.ch > dev->nr_chnls ||
|
||||
ppa->g.lun > dev->luns_per_chnl ||
|
||||
ppa->g.blk > dev->blks_per_lun)) {
|
||||
pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
|
||||
ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
|
||||
|
||||
if (unlikely(ppa.g.ch > dev->nr_chnls ||
|
||||
ppa.g.lun > dev->luns_per_chnl ||
|
||||
ppa.g.blk > dev->blks_per_lun)) {
|
||||
WARN_ON_ONCE(1);
|
||||
pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
|
||||
ppa->g.ch, dev->nr_chnls,
|
||||
ppa->g.lun, dev->luns_per_chnl,
|
||||
ppa->g.blk, dev->blks_per_lun);
|
||||
ppa.g.ch, dev->nr_chnls,
|
||||
ppa.g.lun, dev->luns_per_chnl,
|
||||
ppa.g.blk, dev->blks_per_lun);
|
||||
return;
|
||||
}
|
||||
|
||||
lun = &gn->luns[ppa->g.lun * ppa->g.ch];
|
||||
blk = &lun->vlun.blocks[ppa->g.blk];
|
||||
lun = &gn->luns[ppa.g.lun * ppa.g.ch];
|
||||
blk = &lun->vlun.blocks[ppa.g.blk];
|
||||
|
||||
/* will be moved to bb list on put_blk from target */
|
||||
blk->state = type;
|
||||
}
|
||||
|
||||
/* mark block bad. It is expected the target recover from the error. */
|
||||
/*
|
||||
* mark block bad in gennvm. It is expected that the target recovers separately
|
||||
*/
|
||||
static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!dev->ops->set_bb_tbl)
|
||||
return;
|
||||
|
||||
if (dev->ops->set_bb_tbl(dev, rqd, 1))
|
||||
return;
|
||||
int bit = -1;
|
||||
int max_secs = dev->ops->max_phys_sect;
|
||||
void *comp_bits = &rqd->ppa_status;
|
||||
|
||||
nvm_addr_to_generic_mode(dev, rqd);
|
||||
|
||||
/* look up blocks and mark them as bad */
|
||||
if (rqd->nr_pages > 1)
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_list[i],
|
||||
NVM_BLK_ST_BAD);
|
||||
else
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
|
||||
if (rqd->nr_ppas == 1) {
|
||||
gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
|
||||
return;
|
||||
}
|
||||
|
||||
while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
|
||||
gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
|
||||
}
|
||||
|
||||
static void gennvm_end_io(struct nvm_rq *rqd)
|
||||
{
|
||||
struct nvm_tgt_instance *ins = rqd->ins;
|
||||
|
||||
switch (rqd->error) {
|
||||
case NVM_RSP_SUCCESS:
|
||||
case NVM_RSP_ERR_EMPTYPAGE:
|
||||
break;
|
||||
case NVM_RSP_ERR_FAILWRITE:
|
||||
if (rqd->error == NVM_RSP_ERR_FAILWRITE)
|
||||
gennvm_mark_blk_bad(rqd->dev, rqd);
|
||||
}
|
||||
|
||||
ins->tt->end_io(rqd);
|
||||
}
|
||||
@@ -539,6 +543,8 @@ static struct nvmm_type gennvm = {
|
||||
.submit_io = gennvm_submit_io,
|
||||
.erase_blk = gennvm_erase_blk,
|
||||
|
||||
.mark_blk = gennvm_mark_blk,
|
||||
|
||||
.get_lun = gennvm_get_lun,
|
||||
.reserve_lun = gennvm_reserve_lun,
|
||||
.release_lun = gennvm_release_lun,
|
||||
|
||||
+20
-22
@@ -405,9 +405,8 @@ static void rrpc_block_gc(struct work_struct *work)
|
||||
ws_gc);
|
||||
struct rrpc *rrpc = gcb->rrpc;
|
||||
struct rrpc_block *rblk = gcb->rblk;
|
||||
struct rrpc_lun *rlun = rblk->rlun;
|
||||
struct nvm_dev *dev = rrpc->dev;
|
||||
struct nvm_lun *lun = rblk->parent->lun;
|
||||
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
|
||||
|
||||
mempool_free(gcb, rrpc->gcb_pool);
|
||||
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
|
||||
@@ -508,9 +507,9 @@ static void rrpc_gc_queue(struct work_struct *work)
|
||||
ws_gc);
|
||||
struct rrpc *rrpc = gcb->rrpc;
|
||||
struct rrpc_block *rblk = gcb->rblk;
|
||||
struct rrpc_lun *rlun = rblk->rlun;
|
||||
struct nvm_lun *lun = rblk->parent->lun;
|
||||
struct nvm_block *blk = rblk->parent;
|
||||
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
|
||||
|
||||
spin_lock(&rlun->lock);
|
||||
list_add_tail(&rblk->prio, &rlun->prio_list);
|
||||
@@ -696,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
|
||||
{
|
||||
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
|
||||
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
|
||||
uint8_t npages = rqd->nr_pages;
|
||||
uint8_t npages = rqd->nr_ppas;
|
||||
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
|
||||
|
||||
if (bio_data_dir(rqd->bio) == WRITE)
|
||||
@@ -711,8 +710,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
|
||||
|
||||
if (npages > 1)
|
||||
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
|
||||
if (rqd->metadata)
|
||||
nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
|
||||
|
||||
mempool_free(rqd, rrpc->rq_pool);
|
||||
}
|
||||
@@ -886,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
|
||||
bio_get(bio);
|
||||
rqd->bio = bio;
|
||||
rqd->ins = &rrpc->instance;
|
||||
rqd->nr_pages = nr_pages;
|
||||
rqd->nr_ppas = nr_pages;
|
||||
rrq->flags = flags;
|
||||
|
||||
err = nvm_submit_io(rrpc->dev, rqd);
|
||||
@@ -895,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
|
||||
bio_put(bio);
|
||||
if (!(flags & NVM_IOTYPE_GC)) {
|
||||
rrpc_unlock_rq(rrpc, rqd);
|
||||
if (rqd->nr_pages > 1)
|
||||
if (rqd->nr_ppas > 1)
|
||||
nvm_dev_dma_free(rrpc->dev,
|
||||
rqd->ppa_list, rqd->dma_ppa_list);
|
||||
}
|
||||
@@ -1039,11 +1036,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
|
||||
{
|
||||
struct nvm_dev *dev = rrpc->dev;
|
||||
sector_t i;
|
||||
u64 slba;
|
||||
int ret;
|
||||
|
||||
slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
|
||||
|
||||
rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
|
||||
if (!rrpc->trans_map)
|
||||
return -ENOMEM;
|
||||
@@ -1065,8 +1059,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
|
||||
return 0;
|
||||
|
||||
/* Bring up the mapping table from device */
|
||||
ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
|
||||
rrpc);
|
||||
ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
|
||||
rrpc_l2p_update, rrpc);
|
||||
if (ret) {
|
||||
pr_err("nvm: rrpc: could not read L2P table.\n");
|
||||
return -EINVAL;
|
||||
@@ -1207,10 +1201,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
|
||||
|
||||
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
|
||||
spin_lock_init(&rlun->lock);
|
||||
|
||||
rrpc->total_blocks += dev->blks_per_lun;
|
||||
rrpc->nr_sects += dev->sec_per_lun;
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1224,18 +1214,24 @@ static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
|
||||
struct nvm_dev *dev = rrpc->dev;
|
||||
struct nvmm_type *mt = dev->mt;
|
||||
sector_t size = rrpc->nr_sects * dev->sec_size;
|
||||
int ret;
|
||||
|
||||
size >>= 9;
|
||||
|
||||
return mt->get_area(dev, begin, size);
|
||||
ret = mt->get_area(dev, begin, size);
|
||||
if (!ret)
|
||||
*begin >>= (ilog2(dev->sec_size) - 9);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rrpc_area_free(struct rrpc *rrpc)
|
||||
{
|
||||
struct nvm_dev *dev = rrpc->dev;
|
||||
struct nvmm_type *mt = dev->mt;
|
||||
sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
|
||||
|
||||
mt->put_area(dev, rrpc->soffset);
|
||||
mt->put_area(dev, begin);
|
||||
}
|
||||
|
||||
static void rrpc_free(struct rrpc *rrpc)
|
||||
@@ -1268,7 +1264,7 @@ static sector_t rrpc_capacity(void *private)
|
||||
sector_t reserved, provisioned;
|
||||
|
||||
/* cur, gc, and two emergency blocks for each lun */
|
||||
reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
|
||||
reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
|
||||
provisioned = rrpc->nr_sects - reserved;
|
||||
|
||||
if (reserved > rrpc->nr_sects) {
|
||||
@@ -1388,6 +1384,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
|
||||
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
|
||||
|
||||
rrpc->nr_luns = lun_end - lun_begin + 1;
|
||||
rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
|
||||
rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
|
||||
|
||||
/* simple round-robin strategy */
|
||||
atomic_set(&rrpc->next_lun, -1);
|
||||
@@ -1468,12 +1466,12 @@ static struct nvm_tgt_type tt_rrpc = {
|
||||
|
||||
static int __init rrpc_module_init(void)
|
||||
{
|
||||
return nvm_register_target(&tt_rrpc);
|
||||
return nvm_register_tgt_type(&tt_rrpc);
|
||||
}
|
||||
|
||||
static void rrpc_module_exit(void)
|
||||
{
|
||||
nvm_unregister_target(&tt_rrpc);
|
||||
nvm_unregister_tgt_type(&tt_rrpc);
|
||||
}
|
||||
|
||||
module_init(rrpc_module_init);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user