You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
The block layer calling convention is blk_queue_<limit name>. blk_queue_max_sectors predates this practice, leading to some confusion. Rename the function to appropriately reflect that its intended use is to set max_hw_sectors. Also introduce a temporary wrapper for backwards compability. This can be removed after the merge window is closed. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
committed by
Jens Axboe
parent
eb28d31bc9
commit
086fa5ff08
@@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
|
||||
ubd_dev->fd = fd;
|
||||
|
||||
if(ubd_dev->cow.file != NULL){
|
||||
blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
|
||||
blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
|
||||
|
||||
err = -ENOMEM;
|
||||
ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
|
||||
|
||||
@@ -154,7 +154,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||
q->unplug_timer.data = (unsigned long)q;
|
||||
|
||||
blk_set_default_limits(&q->limits);
|
||||
blk_queue_max_sectors(q, BLK_SAFE_MAX_SECTORS);
|
||||
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
|
||||
|
||||
/*
|
||||
* If the caller didn't supply a lock, fall back to our embedded
|
||||
@@ -210,7 +210,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
|
||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
|
||||
/**
|
||||
* blk_queue_max_sectors - set max sectors for a request for this queue
|
||||
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||
*
|
||||
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
||||
* The soft limit can not exceed max_hw_sectors.
|
||||
**/
|
||||
void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||
{
|
||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
||||
@@ -237,7 +237,7 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||
q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
|
||||
BLK_DEF_MAX_SECTORS);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_sectors);
|
||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_discard_sectors - set max sectors for a single discard
|
||||
|
||||
@@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
|
||||
dev->flags |= ATA_DFLAG_NO_UNLOAD;
|
||||
|
||||
/* configure max sectors */
|
||||
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
|
||||
|
||||
if (dev->class == ATA_DEV_ATAPI) {
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
|
||||
@@ -2535,7 +2535,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
|
||||
RequestQueue->queuedata = Controller;
|
||||
blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
|
||||
blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
|
||||
blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
|
||||
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
|
||||
disk->queue = RequestQueue;
|
||||
sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
|
||||
disk->major = MajorNumber;
|
||||
|
||||
+1
-1
@@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i)
|
||||
goto out_free_dev;
|
||||
blk_queue_make_request(brd->brd_queue, brd_make_request);
|
||||
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
|
||||
blk_queue_max_sectors(brd->brd_queue, 1024);
|
||||
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
|
||||
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
|
||||
|
||||
disk = brd->brd_disk = alloc_disk(1 << part_shift);
|
||||
|
||||
@@ -1802,7 +1802,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
|
||||
/* This is a limit in the driver and could be eliminated. */
|
||||
blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
|
||||
|
||||
blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
|
||||
blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
|
||||
|
||||
blk_queue_softirq_done(disk->queue, cciss_softirq_done);
|
||||
|
||||
|
||||
@@ -709,7 +709,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
|
||||
|
||||
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
|
||||
|
||||
blk_queue_max_sectors(q, max_seg_s >> 9);
|
||||
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
|
||||
blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
|
||||
blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
|
||||
blk_queue_max_segment_size(q, max_seg_s);
|
||||
|
||||
@@ -4234,7 +4234,7 @@ static int __init floppy_init(void)
|
||||
err = -ENOMEM;
|
||||
goto out_unreg_driver;
|
||||
}
|
||||
blk_queue_max_sectors(floppy_queue, 64);
|
||||
blk_queue_max_hw_sectors(floppy_queue, 64);
|
||||
|
||||
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
|
||||
floppy_find, NULL, NULL);
|
||||
|
||||
+1
-1
@@ -719,7 +719,7 @@ static int __init hd_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blk_queue_max_sectors(hd_queue, 255);
|
||||
blk_queue_max_hw_sectors(hd_queue, 255);
|
||||
init_timer(&device_timer);
|
||||
device_timer.function = hd_times_out;
|
||||
blk_queue_logical_block_size(hd_queue, 512);
|
||||
|
||||
@@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev)
|
||||
__func__, __LINE__);
|
||||
goto probe_err_6;
|
||||
}
|
||||
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
|
||||
blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
|
||||
blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
|
||||
|
||||
init_timer(&host->timer);
|
||||
|
||||
@@ -906,7 +906,7 @@ static int __init pd_init(void)
|
||||
if (!pd_queue)
|
||||
goto out1;
|
||||
|
||||
blk_queue_max_sectors(pd_queue, cluster);
|
||||
blk_queue_max_hw_sectors(pd_queue, cluster);
|
||||
|
||||
if (register_blkdev(major, name))
|
||||
goto out2;
|
||||
|
||||
@@ -2312,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
|
||||
* even if the size is a multiple of the packet size.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blk_queue_max_sectors(q, pd->settings.size);
|
||||
blk_queue_max_hw_sectors(q, pd->settings.size);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
set_bit(PACKET_WRITABLE, &pd->flags);
|
||||
} else {
|
||||
@@ -2613,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
|
||||
|
||||
blk_queue_make_request(q, pkt_make_request);
|
||||
blk_queue_logical_block_size(q, CD_FRAMESIZE);
|
||||
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
|
||||
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
|
||||
blk_queue_merge_bvec(q, pkt_merge_bvec);
|
||||
q->queuedata = pd;
|
||||
}
|
||||
|
||||
@@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
|
||||
|
||||
blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
|
||||
|
||||
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
|
||||
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
|
||||
blk_queue_segment_boundary(queue, -1UL);
|
||||
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
||||
blk_queue_logical_block_size(queue, dev->blk_size);
|
||||
|
||||
@@ -754,7 +754,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
|
||||
blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
|
||||
blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
|
||||
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
|
||||
blk_queue_max_sectors(queue, BLK_SAFE_MAX_SECTORS);
|
||||
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
|
||||
|
||||
gendisk = alloc_disk(1);
|
||||
if (!gendisk) {
|
||||
|
||||
@@ -693,7 +693,7 @@ static int probe_disk(struct vdc_port *port)
|
||||
|
||||
blk_queue_max_hw_segments(q, port->ring_cookies);
|
||||
blk_queue_max_phys_segments(q, port->ring_cookies);
|
||||
blk_queue_max_sectors(q, port->max_xfer_size);
|
||||
blk_queue_max_hw_sectors(q, port->max_xfer_size);
|
||||
g->major = vdc_major;
|
||||
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
|
||||
strcpy(g->disk_name, port->disk_name);
|
||||
|
||||
+1
-1
@@ -2323,7 +2323,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
|
||||
blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
|
||||
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
|
||||
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
|
||||
blk_queue_max_sectors(q, UB_MAX_SECTORS);
|
||||
blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
|
||||
blk_queue_logical_block_size(q, lun->capacity.bsize);
|
||||
|
||||
lun->disk = disk;
|
||||
|
||||
@@ -473,7 +473,7 @@ retry:
|
||||
d->disk = g;
|
||||
blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
|
||||
blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
|
||||
blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
|
||||
blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
|
||||
g->major = VIODASD_MAJOR;
|
||||
g->first_minor = dev_no << PARTITION_SHIFT;
|
||||
if (dev_no >= 26)
|
||||
|
||||
+1
-1
@@ -242,7 +242,7 @@ static int __init xd_init(void)
|
||||
}
|
||||
|
||||
/* xd_maxsectors depends on controller - so set after detection */
|
||||
blk_queue_max_sectors(xd_queue, xd_maxsectors);
|
||||
blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
|
||||
|
||||
for (i = 0; i < xd_drives; i++)
|
||||
add_disk(xd_gendisk[i]);
|
||||
|
||||
@@ -346,7 +346,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
||||
|
||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||
blk_queue_logical_block_size(rq, sector_size);
|
||||
blk_queue_max_sectors(rq, 512);
|
||||
blk_queue_max_hw_sectors(rq, 512);
|
||||
|
||||
/* Each segment in a request is up to an aligned page in size. */
|
||||
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
|
||||
|
||||
@@ -618,7 +618,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
sizeof(gendisk->disk_name));
|
||||
blk_queue_max_hw_segments(q, 1);
|
||||
blk_queue_max_phys_segments(q, 1);
|
||||
blk_queue_max_sectors(q, 4096 / 512);
|
||||
blk_queue_max_hw_sectors(q, 4096 / 512);
|
||||
gendisk->queue = q;
|
||||
gendisk->fops = &viocd_fops;
|
||||
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user