You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (ufs, pm80xx, lpfc, mpi3mr, mpt3sas, hisi_sas, libsas) and minor updates and bug fixes. The most impactful change is likely the switch from GFP_DMA to GFP_KERNEL in a bunch of drivers, but even that shouldn't affect too many people" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (121 commits) scsi: mpi3mr: Bump driver version to 8.0.0.61.0 scsi: mpi3mr: Fixes around reply request queues scsi: mpi3mr: Enhanced Task Management Support Reply handling scsi: mpi3mr: Use TM response codes from MPI3 headers scsi: mpi3mr: Add io_uring interface support in I/O-polled mode scsi: mpi3mr: Print cable mngnt and temp threshold events scsi: mpi3mr: Support Prepare for Reset event scsi: mpi3mr: Add Event acknowledgment logic scsi: mpi3mr: Gracefully handle online FW update operation scsi: mpi3mr: Detect async reset that occurred in firmware scsi: mpi3mr: Add IOC reinit function scsi: mpi3mr: Handle offline FW activation in graceful manner scsi: mpi3mr: Code refactor of IOC init - part2 scsi: mpi3mr: Code refactor of IOC init - part1 scsi: mpi3mr: Fault IOC when internal command gets timeout scsi: mpi3mr: Display IOC firmware package version scsi: mpi3mr: Handle unaligned PLL in unmap cmnds scsi: mpi3mr: Increase internal cmnds timeout to 60s scsi: mpi3mr: Do access status validation before adding devices scsi: mpi3mr: Add support for PCIe Managed Switch SES device ...
This commit is contained in:
@@ -7081,9 +7081,7 @@ S: Maintained
|
||||
F: drivers/mmc/host/cqhci*
|
||||
|
||||
EMULEX 10Gbps iSCSI - OneConnect DRIVER
|
||||
M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
|
||||
M: Ketan Mukadam <ketan.mukadam@broadcom.com>
|
||||
M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.broadcom.com
|
||||
|
||||
@@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||
/**
|
||||
* blk_post_runtime_resume - Post runtime resume processing
|
||||
* @q: the queue of the device
|
||||
* @err: return value of the device's runtime_resume function
|
||||
*
|
||||
* Description:
|
||||
* Update the queue's runtime status according to the return value of the
|
||||
* device's runtime_resume function. If the resume was successful, call
|
||||
* blk_set_runtime_active() to do the real work of restarting the queue.
|
||||
* For historical reasons, this routine merely calls blk_set_runtime_active()
|
||||
* to do the real work of restarting the queue. It does this regardless of
|
||||
* whether the device's runtime-resume succeeded; even if it failed the
|
||||
* driver or error handler will need to communicate with the device.
|
||||
*
|
||||
* This function should be called near the end of the device's
|
||||
* runtime_resume callback.
|
||||
*/
|
||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||
void blk_post_runtime_resume(struct request_queue *q)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
if (!err) {
|
||||
blk_set_runtime_active(q);
|
||||
} else {
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
q->rpm_status = RPM_SUSPENDED;
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
blk_set_runtime_active(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
|
||||
@@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
* runtime PM status and re-enable peeking requests from the queue. It
|
||||
* should be called before first request is added to the queue.
|
||||
*
|
||||
* This function is also called by blk_post_runtime_resume() for successful
|
||||
* This function is also called by blk_post_runtime_resume() for
|
||||
* runtime resumes. It does everything necessary to restart the queue.
|
||||
*/
|
||||
void blk_set_runtime_active(struct request_queue *q)
|
||||
|
||||
@@ -1274,8 +1274,6 @@ mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
|
||||
static int
|
||||
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
/* return if in use */
|
||||
if (CHIPREG_READ32(&ioc->chip->Doorbell)
|
||||
& MPI_DOORBELL_ACTIVE)
|
||||
@@ -1289,9 +1287,9 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee
|
||||
(access_control_value<<12)));
|
||||
|
||||
/* Wait for IOC to clear Doorbell Status bit */
|
||||
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
|
||||
if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0)
|
||||
return -2;
|
||||
}else
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
|
||||
/**
|
||||
* inia100_queue_lck - queue command with host
|
||||
* @cmd: Command block
|
||||
* @done: Completion function
|
||||
*
|
||||
* Called by the mid layer to queue a command. Process the command
|
||||
* block, build the host specific scb structures and if there is room
|
||||
* queue the command down to the controller
|
||||
*/
|
||||
|
||||
static int inia100_queue_lck(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct orc_scb *scb;
|
||||
|
||||
@@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
|
||||
/**
|
||||
* atp870u_queuecommand_lck - Queue SCSI command
|
||||
* @req_p: request block
|
||||
* @done: completion function
|
||||
*
|
||||
* Queue a command to the ATP queue. Called with the host lock held.
|
||||
*/
|
||||
|
||||
@@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
struct attribute *bfad_im_vport_attrs[] = {
|
||||
static struct attribute *bfad_im_vport_attrs[] = {
|
||||
&dev_attr_serial_number.attr,
|
||||
&dev_attr_model.attr,
|
||||
&dev_attr_model_description.attr,
|
||||
|
||||
@@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
|
||||
u_char *buffer;
|
||||
int result;
|
||||
|
||||
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
||||
buffer = kmalloc(512, GFP_KERNEL);
|
||||
if(!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
|
||||
int result,id,lun,i;
|
||||
u_int elem;
|
||||
|
||||
buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
|
||||
buffer = kzalloc(512, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file,
|
||||
return -EINVAL;
|
||||
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
|
||||
|
||||
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
||||
buffer = kmalloc(512, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
mutex_lock(&ch->lock);
|
||||
|
||||
@@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
|
||||
* layer, invoke 'done' on completion
|
||||
*
|
||||
* @cmd: pointer to scsi command object
|
||||
* @done: function pointer to be invoked on completion
|
||||
*
|
||||
* Returns 1 if the adapter (host) is busy, else returns 0. One
|
||||
* reason for an adapter to be busy is that the number
|
||||
@@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
|
||||
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
|
||||
* and is expected to be held on return.
|
||||
*
|
||||
**/
|
||||
*/
|
||||
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
|
||||
{
|
||||
void (*done)(struct scsi_cmnd *) = scsi_done;
|
||||
|
||||
@@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
|
||||
|
||||
dma.size = FW_WRITE_BUFSIZE;
|
||||
dma.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma.size, &dma.phys, GFP_DMA);
|
||||
dma.size, &dma.phys, GFP_KERNEL);
|
||||
if (!dma.virt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
|
||||
dma = &hw->xfer_rdy;
|
||||
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
|
||||
dma->virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma->size, &dma->phys, GFP_DMA);
|
||||
dma->size, &dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
|
||||
sizeof(struct sli4_sge);
|
||||
dma->virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma->size, &dma->phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!dma->virt) {
|
||||
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
|
||||
memset(&io->def_sgl, 0,
|
||||
@@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
|
||||
memset(&req, 0, sizeof(struct efc_dma));
|
||||
req.size = 32 + sgls_per_request * 16;
|
||||
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!req.virt) {
|
||||
kfree(sgls);
|
||||
return -ENOMEM;
|
||||
@@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
|
||||
dma = &hw->loop_map;
|
||||
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
|
||||
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
@@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
|
||||
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
prq->dma.size,
|
||||
&prq->dma.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!prq->dma.virt) {
|
||||
efc_log_err(hw->os, "DMA allocation failed\n");
|
||||
kfree(rq_buf);
|
||||
|
||||
@@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
|
||||
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
|
||||
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
io->rspbuf.size,
|
||||
&io->rspbuf.phys, GFP_DMA);
|
||||
&io->rspbuf.phys, GFP_KERNEL);
|
||||
if (!io->rspbuf.virt) {
|
||||
efc_log_err(efct, "dma_alloc rspbuf failed\n");
|
||||
efct_io_pool_free(io_pool);
|
||||
|
||||
@@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
|
||||
nport->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
nport->dma.size, &nport->dma.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!nport->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
@@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
|
||||
domain->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
domain->dma.size,
|
||||
&domain->dma.phys, GFP_DMA);
|
||||
&domain->dma.phys, GFP_KERNEL);
|
||||
if (!domain->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
return -EIO;
|
||||
|
||||
@@ -71,7 +71,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
||||
/* now allocate DMA for request and response */
|
||||
els->io.req.size = reqlen;
|
||||
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
|
||||
&els->io.req.phys, GFP_DMA);
|
||||
&els->io.req.phys, GFP_KERNEL);
|
||||
if (!els->io.req.virt) {
|
||||
mempool_free(els, efc->els_io_pool);
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
@@ -80,7 +80,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
||||
|
||||
els->io.rsp.size = rsplen;
|
||||
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
|
||||
&els->io.rsp.phys, GFP_DMA);
|
||||
&els->io.rsp.phys, GFP_KERNEL);
|
||||
if (!els->io.rsp.virt) {
|
||||
dma_free_coherent(&efc->pci->dev, els->io.req.size,
|
||||
els->io.req.virt, els->io.req.phys);
|
||||
|
||||
@@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
|
||||
|
||||
dma->size = payload_size;
|
||||
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
|
||||
&dma->phys, GFP_DMA);
|
||||
&dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
@@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
|
||||
|
||||
q->dma.size = size * n_entries;
|
||||
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
|
||||
&q->dma.phys, GFP_DMA);
|
||||
&q->dma.phys, GFP_KERNEL);
|
||||
if (!q->dma.virt) {
|
||||
memset(&q->dma, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
|
||||
@@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
|
||||
|
||||
dma->size = payload_size;
|
||||
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
|
||||
&dma->phys, GFP_DMA);
|
||||
&dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
@@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
|
||||
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
|
||||
data.size = psize;
|
||||
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
|
||||
&data.phys, GFP_DMA);
|
||||
&data.phys, GFP_KERNEL);
|
||||
if (!data.virt) {
|
||||
memset(&data, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
|
||||
@@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
|
||||
*/
|
||||
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
|
||||
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
|
||||
&sli4->bmbx.phys, GFP_DMA);
|
||||
&sli4->bmbx.phys, GFP_KERNEL);
|
||||
if (!sli4->bmbx.virt) {
|
||||
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
|
||||
@@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
|
||||
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
|
||||
sli4->vpd_data.size,
|
||||
&sli4->vpd_data.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!sli4->vpd_data.virt) {
|
||||
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
|
||||
/* Note that failure isn't fatal in this specific case */
|
||||
@@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
|
||||
payload_dma->size = payload_size;
|
||||
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
|
||||
payload_dma->size,
|
||||
&payload_dma->phys, GFP_DMA);
|
||||
&payload_dma->phys, GFP_KERNEL);
|
||||
if (!payload_dma->virt) {
|
||||
memset(payload_dma, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "mbox payload memory allocation fail\n");
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#define _HISI_SAS_H_
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/clk.h>
|
||||
@@ -134,6 +133,11 @@ struct hisi_sas_rst {
|
||||
bool done;
|
||||
};
|
||||
|
||||
struct hisi_sas_internal_abort {
|
||||
unsigned int flag;
|
||||
unsigned int tag;
|
||||
};
|
||||
|
||||
#define HISI_SAS_RST_WORK_INIT(r, c) \
|
||||
{ .hisi_hba = hisi_hba, \
|
||||
.completion = &c, \
|
||||
@@ -154,6 +158,7 @@ enum hisi_sas_bit_err_type {
|
||||
enum hisi_sas_phy_event {
|
||||
HISI_PHYE_PHY_UP = 0U,
|
||||
HISI_PHYE_LINK_RESET,
|
||||
HISI_PHYE_PHY_UP_PM,
|
||||
HISI_PHYES_NUM,
|
||||
};
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1484,7 +1484,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
del_timer(&phy->timer);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
|
||||
|
||||
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
|
||||
@@ -1561,9 +1560,18 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
}
|
||||
|
||||
phy->port_id = port_id;
|
||||
phy->phy_attached = 1;
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
|
||||
|
||||
/* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
|
||||
pm_runtime_get_noresume(dev);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
|
||||
|
||||
res = IRQ_HANDLED;
|
||||
|
||||
spin_lock(&phy->lock);
|
||||
/* Delete timer and set phy_attached atomically */
|
||||
del_timer(&phy->timer);
|
||||
phy->phy_attached = 1;
|
||||
spin_unlock(&phy->lock);
|
||||
end:
|
||||
if (phy->reset_completion)
|
||||
complete(phy->reset_completion);
|
||||
@@ -4775,6 +4783,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
scsi_scan_host(shost);
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, 5000);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
/*
|
||||
* For the situation that there are ATA disks connected with SAS
|
||||
* controller, it additionally creates ata_port which will affect the
|
||||
@@ -4848,6 +4858,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
|
||||
int rc;
|
||||
|
||||
dev_info(dev, "FLR prepare\n");
|
||||
down(&hisi_hba->sem);
|
||||
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
|
||||
hisi_sas_controller_reset_prepare(hisi_hba);
|
||||
|
||||
@@ -4897,6 +4908,8 @@ static int _suspend_v3_hw(struct device *device)
|
||||
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
|
||||
return -1;
|
||||
|
||||
dev_warn(dev, "entering suspend state\n");
|
||||
|
||||
scsi_block_requests(shost);
|
||||
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
flush_workqueue(hisi_hba->wq);
|
||||
@@ -4912,11 +4925,11 @@ static int _suspend_v3_hw(struct device *device)
|
||||
|
||||
hisi_sas_init_mem(hisi_hba);
|
||||
|
||||
dev_warn(dev, "entering suspend state\n");
|
||||
|
||||
hisi_sas_release_tasks(hisi_hba);
|
||||
|
||||
sas_suspend_ha(sha);
|
||||
|
||||
dev_warn(dev, "end of suspending controller\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4943,9 +4956,19 @@ static int _resume_v3_hw(struct device *device)
|
||||
return rc;
|
||||
}
|
||||
phys_init_v3_hw(hisi_hba);
|
||||
sas_resume_ha(sha);
|
||||
|
||||
/*
|
||||
* If a directly-attached disk is removed during suspend, a deadlock
|
||||
* may occur, as the PHYE_RESUME_TIMEOUT processing will require the
|
||||
* hisi_hba->device to be active, which can only happen when resume
|
||||
* completes. So don't wait for the HA event workqueue to drain upon
|
||||
* resume.
|
||||
*/
|
||||
sas_resume_ha_no_sync(sha);
|
||||
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
|
||||
|
||||
dev_warn(dev, "end of resuming controller\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
|
||||
static struct class shost_class = {
|
||||
.name = "scsi_host",
|
||||
.dev_release = scsi_host_cls_release,
|
||||
.dev_groups = scsi_shost_groups,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -377,7 +378,7 @@ static struct device_type scsi_host_type = {
|
||||
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
int index, i, j = 0;
|
||||
int index;
|
||||
|
||||
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
|
||||
if (!shost)
|
||||
@@ -483,17 +484,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
shost->shost_dev.parent = &shost->shost_gendev;
|
||||
shost->shost_dev.class = &shost_class;
|
||||
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
|
||||
shost->shost_dev.groups = shost->shost_dev_attr_groups;
|
||||
shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
|
||||
if (sht->shost_groups) {
|
||||
for (i = 0; sht->shost_groups[i] &&
|
||||
j < ARRAY_SIZE(shost->shost_dev_attr_groups);
|
||||
i++, j++) {
|
||||
shost->shost_dev_attr_groups[j] =
|
||||
sht->shost_groups[i];
|
||||
}
|
||||
}
|
||||
WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
|
||||
shost->shost_dev.groups = sht->shost_groups;
|
||||
|
||||
shost->ehandler = kthread_run(scsi_error_handler, shost,
|
||||
"scsi_eh_%d", shost->host_no);
|
||||
|
||||
@@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
|
||||
int i, ndevs_to_allocate;
|
||||
int raid_ctlr_position;
|
||||
bool physical_device;
|
||||
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
|
||||
|
||||
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
|
||||
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
|
||||
@@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
|
||||
dev_err(&h->pdev->dev, "out of memory\n");
|
||||
goto out;
|
||||
}
|
||||
memset(lunzerobits, 0, sizeof(lunzerobits));
|
||||
|
||||
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
|
||||
|
||||
|
||||
@@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
|
||||
/**
|
||||
* i91u_queuecommand_lck - Queue a new command if possible
|
||||
* @cmd: SCSI command block from the mid layer
|
||||
* @done: Completion handler
|
||||
*
|
||||
* Attempts to queue a new command with the host adapter. Will return
|
||||
* zero if successful or indicate a host busy condition if not (which
|
||||
* will cause the mid layer to call us again later with the command)
|
||||
*/
|
||||
|
||||
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
|
||||
@@ -2849,7 +2847,8 @@ static int initio_probe_one(struct pci_dev *pdev,
|
||||
|
||||
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
|
||||
i = num_scb * sizeof(struct scsi_ctrl_blk);
|
||||
if ((scb = kzalloc(i, GFP_DMA)) != NULL)
|
||||
scb = kzalloc(i, GFP_KERNEL);
|
||||
if (scb)
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user