You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
target: kill struct se_subsystem_dev
Simplify the code a lot by killing the superflous struct se_subsystem_dev. Instead se_device is allocated early on by the backend driver, which allocates it as part of its own per-device structure, borrowing the scheme that is for example used for inode allocation. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
committed by
Nicholas Bellinger
parent
3d70f8c617
commit
0fd97ccf45
@@ -53,7 +53,6 @@ struct tcm_loop_hba {
|
||||
struct se_hba_s *se_hba;
|
||||
struct se_lun *tl_hba_lun;
|
||||
struct se_port *tl_hba_lun_sep;
|
||||
struct se_device_s *se_dev_hba_ptr;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct device dev;
|
||||
struct Scsi_Host *sh;
|
||||
|
||||
@@ -61,7 +61,7 @@ struct t10_alua_lu_gp *default_lu_gp;
|
||||
*/
|
||||
int target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_port *port;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
@@ -86,8 +86,8 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||
}
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
/*
|
||||
* Check if the Target port group and Target port descriptor list
|
||||
@@ -160,7 +160,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||
}
|
||||
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
}
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
/*
|
||||
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
|
||||
*/
|
||||
@@ -203,7 +203,6 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||
int target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
|
||||
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
|
||||
@@ -303,9 +302,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
||||
* Locate the matching target port group ID from
|
||||
* the global tg_pt_gp list
|
||||
*/
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp,
|
||||
&su_dev->t10_alua.tg_pt_gps_list,
|
||||
&dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
if (!tg_pt_gp->tg_pt_gp_valid_id)
|
||||
continue;
|
||||
@@ -315,18 +314,18 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
||||
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
rc = core_alua_do_port_transition(tg_pt_gp,
|
||||
dev, l_port, nacl,
|
||||
alua_access_state, 1);
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
break;
|
||||
}
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
/*
|
||||
* If not matching target port group ID can be located
|
||||
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
|
||||
@@ -758,8 +757,7 @@ static int core_alua_update_tpg_primary_metadata(
|
||||
int primary_state,
|
||||
unsigned char *md_buf)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
|
||||
struct t10_wwn *wwn = &su_dev->t10_wwn;
|
||||
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
|
||||
char path[ALUA_METADATA_PATH_LEN];
|
||||
int len;
|
||||
|
||||
@@ -899,7 +897,6 @@ int core_alua_do_port_transition(
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_port *port;
|
||||
struct se_subsystem_dev *su_dev;
|
||||
struct se_node_acl *nacl;
|
||||
struct t10_alua_lu_gp *lu_gp;
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
|
||||
@@ -949,14 +946,13 @@ int core_alua_do_port_transition(
|
||||
lu_gp_mem_list) {
|
||||
|
||||
dev = lu_gp_mem->lu_gp_mem_dev;
|
||||
su_dev = dev->se_sub_dev;
|
||||
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
spin_unlock(&lu_gp->lu_gp_lock);
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp,
|
||||
&su_dev->t10_alua.tg_pt_gps_list,
|
||||
&dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
|
||||
if (!tg_pt_gp->tg_pt_gp_valid_id)
|
||||
@@ -981,7 +977,7 @@ int core_alua_do_port_transition(
|
||||
}
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
/*
|
||||
* core_alua_do_transition_tg_pt() will always return
|
||||
* success.
|
||||
@@ -989,11 +985,11 @@ int core_alua_do_port_transition(
|
||||
core_alua_do_transition_tg_pt(tg_pt_gp, port,
|
||||
nacl, md_buf, new_state, explict);
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
}
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
spin_lock(&lu_gp->lu_gp_lock);
|
||||
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
|
||||
@@ -1268,8 +1264,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
|
||||
|
||||
void core_alua_free_lu_gp_mem(struct se_device *dev)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct t10_alua *alua = &su_dev->t10_alua;
|
||||
struct t10_alua *alua = &dev->t10_alua;
|
||||
struct t10_alua_lu_gp *lu_gp;
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem;
|
||||
|
||||
@@ -1358,10 +1353,8 @@ void __core_alua_drop_lu_gp_mem(
|
||||
spin_unlock(&lu_gp->lu_gp_lock);
|
||||
}
|
||||
|
||||
struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
|
||||
struct se_subsystem_dev *su_dev,
|
||||
const char *name,
|
||||
int def_group)
|
||||
struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
|
||||
const char *name, int def_group)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
|
||||
@@ -1375,7 +1368,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
|
||||
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
|
||||
tg_pt_gp->tg_pt_gp_su_dev = su_dev;
|
||||
tg_pt_gp->tg_pt_gp_dev = dev;
|
||||
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
|
||||
@@ -1392,14 +1385,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
|
||||
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
|
||||
|
||||
if (def_group) {
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
tg_pt_gp->tg_pt_gp_id =
|
||||
su_dev->t10_alua.alua_tg_pt_gps_counter++;
|
||||
dev->t10_alua.alua_tg_pt_gps_counter++;
|
||||
tg_pt_gp->tg_pt_gp_valid_id = 1;
|
||||
su_dev->t10_alua.alua_tg_pt_gps_count++;
|
||||
dev->t10_alua.alua_tg_pt_gps_count++;
|
||||
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
|
||||
&su_dev->t10_alua.tg_pt_gps_list);
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
&dev->t10_alua.tg_pt_gps_list);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
}
|
||||
|
||||
return tg_pt_gp;
|
||||
@@ -1409,9 +1402,10 @@ int core_alua_set_tg_pt_gp_id(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
u16 tg_pt_gp_id)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
|
||||
u16 tg_pt_gp_id_tmp;
|
||||
|
||||
/*
|
||||
* The tg_pt_gp->tg_pt_gp_id may only be set once..
|
||||
*/
|
||||
@@ -1421,19 +1415,19 @@ int core_alua_set_tg_pt_gp_id(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
|
||||
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
|
||||
" 0x0000ffff reached\n");
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
|
||||
return -ENOSPC;
|
||||
}
|
||||
again:
|
||||
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
|
||||
su_dev->t10_alua.alua_tg_pt_gps_counter++;
|
||||
dev->t10_alua.alua_tg_pt_gps_counter++;
|
||||
|
||||
list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
|
||||
list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
|
||||
if (!tg_pt_gp_id)
|
||||
@@ -1441,7 +1435,7 @@ again:
|
||||
|
||||
pr_err("ALUA Target Port Group ID: %hu already"
|
||||
" exists, ignoring request\n", tg_pt_gp_id);
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@@ -1449,9 +1443,9 @@ again:
|
||||
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
|
||||
tg_pt_gp->tg_pt_gp_valid_id = 1;
|
||||
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
|
||||
&su_dev->t10_alua.tg_pt_gps_list);
|
||||
su_dev->t10_alua.alua_tg_pt_gps_count++;
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
&dev->t10_alua.tg_pt_gps_list);
|
||||
dev->t10_alua.alua_tg_pt_gps_count++;
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1480,8 +1474,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
|
||||
void core_alua_free_tg_pt_gp(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
|
||||
|
||||
/*
|
||||
* Once we have reached this point, config_item_put() has already
|
||||
* been called from target_core_alua_drop_tg_pt_gp().
|
||||
@@ -1490,10 +1485,11 @@ void core_alua_free_tg_pt_gp(
|
||||
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
|
||||
* can be made while we are releasing struct t10_alua_tg_pt_gp.
|
||||
*/
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_del(&tg_pt_gp->tg_pt_gp_list);
|
||||
su_dev->t10_alua.alua_tg_pt_gps_counter--;
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
dev->t10_alua.alua_tg_pt_gps_counter--;
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
/*
|
||||
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
|
||||
* core_alua_get_tg_pt_gp_by_name() in
|
||||
@@ -1502,6 +1498,7 @@ void core_alua_free_tg_pt_gp(
|
||||
*/
|
||||
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
|
||||
cpu_relax();
|
||||
|
||||
/*
|
||||
* Release reference to struct t10_alua_tg_pt_gp from all associated
|
||||
* struct se_port.
|
||||
@@ -1525,9 +1522,9 @@ void core_alua_free_tg_pt_gp(
|
||||
* default_tg_pt_gp.
|
||||
*/
|
||||
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
|
||||
if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
|
||||
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
|
||||
su_dev->t10_alua.default_tg_pt_gp);
|
||||
dev->t10_alua.default_tg_pt_gp);
|
||||
} else
|
||||
tg_pt_gp_mem->tg_pt_gp = NULL;
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
@@ -1541,8 +1538,7 @@ void core_alua_free_tg_pt_gp(
|
||||
|
||||
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
|
||||
struct t10_alua *alua = &su_dev->t10_alua;
|
||||
struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
|
||||
@@ -1574,25 +1570,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
|
||||
}
|
||||
|
||||
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
|
||||
struct se_subsystem_dev *su_dev,
|
||||
const char *name)
|
||||
struct se_device *dev, const char *name)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct config_item *ci;
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
if (!tg_pt_gp->tg_pt_gp_valid_id)
|
||||
continue;
|
||||
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
|
||||
if (!strcmp(config_item_name(ci), name)) {
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
return tg_pt_gp;
|
||||
}
|
||||
}
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -1600,11 +1595,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
|
||||
static void core_alua_put_tg_pt_gp_from_name(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
|
||||
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1640,9 +1635,8 @@ static void __core_alua_drop_tg_pt_gp_mem(
|
||||
|
||||
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
|
||||
struct config_item *tg_pt_ci;
|
||||
struct t10_alua *alua = &su_dev->t10_alua;
|
||||
struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
ssize_t len = 0;
|
||||
@@ -1683,7 +1677,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
|
||||
{
|
||||
struct se_portal_group *tpg;
|
||||
struct se_lun *lun;
|
||||
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
|
||||
struct se_device *dev = port->sep_lun->lun_se_dev;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
unsigned char buf[TG_PT_GROUP_NAME_BUF];
|
||||
@@ -1692,7 +1686,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
|
||||
tpg = port->sep_tpg;
|
||||
lun = port->sep_lun;
|
||||
|
||||
if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
|
||||
if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
|
||||
pr_warn("SPC3_ALUA_EMULATED not enabled for"
|
||||
" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg),
|
||||
@@ -1716,7 +1710,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
|
||||
* struct t10_alua_tg_pt_gp. This reference is released with
|
||||
* core_alua_put_tg_pt_gp_from_name() below.
|
||||
*/
|
||||
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
|
||||
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
|
||||
strstrip(buf));
|
||||
if (!tg_pt_gp_new)
|
||||
return -ENODEV;
|
||||
@@ -1750,7 +1744,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
|
||||
|
||||
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
|
||||
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
|
||||
su_dev->t10_alua.default_tg_pt_gp);
|
||||
dev->t10_alua.default_tg_pt_gp);
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
||||
return count;
|
||||
@@ -2054,32 +2048,29 @@ ssize_t core_alua_store_secondary_write_metadata(
|
||||
return count;
|
||||
}
|
||||
|
||||
int core_setup_alua(struct se_device *dev, int force_pt)
|
||||
int core_setup_alua(struct se_device *dev)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct t10_alua *alua = &su_dev->t10_alua;
|
||||
struct t10_alua *alua = &dev->t10_alua;
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem;
|
||||
|
||||
/*
|
||||
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
|
||||
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
|
||||
* cause a problem because libata and some SATA RAID HBAs appear
|
||||
* under Linux/SCSI, but emulate SCSI logic themselves.
|
||||
*/
|
||||
if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
|
||||
!(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
|
||||
alua->alua_type = SPC_ALUA_PASSTHROUGH;
|
||||
alua->alua_state_check = &core_alua_state_check_nop;
|
||||
if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) ||
|
||||
(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV &&
|
||||
!dev->dev_attrib.emulate_alua)) {
|
||||
pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
|
||||
" emulation\n", dev->transport->name);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* If SPC-3 or above is reported by real or emulated struct se_device,
|
||||
* use emulated ALUA.
|
||||
*/
|
||||
if (dev->transport->get_device_rev(dev) >= SCSI_3) {
|
||||
|
||||
alua->alua_type = SPC_ALUA_PASSTHROUGH;
|
||||
alua->alua_state_check = &core_alua_state_check_nop;
|
||||
} else if (dev->transport->get_device_rev(dev) >= SCSI_3) {
|
||||
pr_debug("%s: Enabling ALUA Emulation for SPC-3"
|
||||
" device\n", dev->transport->name);
|
||||
|
||||
/*
|
||||
* Associate this struct se_device with the default ALUA
|
||||
* LUN Group.
|
||||
@@ -2099,10 +2090,11 @@ int core_setup_alua(struct se_device *dev, int force_pt)
|
||||
" core/alua/lu_gps/default_lu_gp\n",
|
||||
dev->transport->name);
|
||||
} else {
|
||||
alua->alua_type = SPC2_ALUA_DISABLED;
|
||||
alua->alua_state_check = &core_alua_state_check_nop;
|
||||
pr_debug("%s: Disabling ALUA Emulation for SPC-2"
|
||||
" device\n", dev->transport->name);
|
||||
|
||||
alua->alua_type = SPC2_ALUA_DISABLED;
|
||||
alua->alua_state_check = &core_alua_state_check_nop;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
|
||||
struct t10_alua_lu_gp *);
|
||||
extern void core_alua_drop_lu_gp_dev(struct se_device *);
|
||||
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
|
||||
struct se_subsystem_dev *, const char *, int);
|
||||
struct se_device *, const char *, int);
|
||||
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
|
||||
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
|
||||
struct se_port *);
|
||||
@@ -131,6 +131,6 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
|
||||
char *);
|
||||
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
|
||||
const char *, size_t);
|
||||
extern int core_setup_alua(struct se_device *, int);
|
||||
extern int core_setup_alua(struct se_device *);
|
||||
|
||||
#endif /* TARGET_CORE_ALUA_H */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -734,14 +734,12 @@ static int target_fabric_port_link(
|
||||
struct config_item *se_dev_ci)
|
||||
{
|
||||
struct config_item *tpg_ci;
|
||||
struct se_device *dev;
|
||||
struct se_lun *lun = container_of(to_config_group(lun_ci),
|
||||
struct se_lun, lun_group);
|
||||
struct se_lun *lun_p;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct se_subsystem_dev *se_dev = container_of(
|
||||
to_config_group(se_dev_ci), struct se_subsystem_dev,
|
||||
se_dev_group);
|
||||
struct se_device *dev =
|
||||
container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
|
||||
struct target_fabric_configfs *tf;
|
||||
int ret;
|
||||
|
||||
@@ -755,14 +753,6 @@ static int target_fabric_port_link(
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
dev = se_dev->se_dev_ptr;
|
||||
if (!dev) {
|
||||
pr_err("Unable to locate struct se_device pointer from"
|
||||
" %s\n", config_item_name(se_dev_ci));
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
|
||||
if (IS_ERR(lun_p)) {
|
||||
pr_err("core_dev_add_lun() failed\n");
|
||||
|
||||
@@ -41,7 +41,10 @@
|
||||
|
||||
#include "target_core_file.h"
|
||||
|
||||
static struct se_subsystem_api fileio_template;
|
||||
static inline struct fd_dev *FD_DEV(struct se_device *dev)
|
||||
{
|
||||
return container_of(dev, struct fd_dev, dev);
|
||||
}
|
||||
|
||||
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
@@ -82,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
|
||||
hba->hba_ptr = NULL;
|
||||
}
|
||||
|
||||
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
|
||||
{
|
||||
struct fd_dev *fd_dev;
|
||||
struct fd_host *fd_host = hba->hba_ptr;
|
||||
@@ -97,34 +100,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
|
||||
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
|
||||
|
||||
return fd_dev;
|
||||
return &fd_dev->dev;
|
||||
}
|
||||
|
||||
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
*/
|
||||
static struct se_device *fd_create_virtdevice(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
void *p)
|
||||
static int fd_configure_device(struct se_device *dev)
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_dev_limits dev_limits;
|
||||
struct queue_limits *limits;
|
||||
struct fd_dev *fd_dev = p;
|
||||
struct fd_host *fd_host = hba->hba_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
struct fd_host *fd_host = dev->se_hba->hba_ptr;
|
||||
struct file *file;
|
||||
struct inode *inode = NULL;
|
||||
int dev_flags = 0, flags, ret = -EINVAL;
|
||||
int flags, ret = -EINVAL;
|
||||
|
||||
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
|
||||
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
|
||||
pr_err("Missing fd_dev_name=\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
|
||||
* of pure timestamp updates.
|
||||
*/
|
||||
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
|
||||
|
||||
/*
|
||||
* Optionally allow fd_buffered_io=1 to be enabled for people
|
||||
* who want use the fs buffer cache as an WriteCache mechanism.
|
||||
@@ -154,22 +151,17 @@ static struct se_device *fd_create_virtdevice(
|
||||
*/
|
||||
inode = file->f_mapping->host;
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
struct request_queue *q;
|
||||
struct request_queue *q = bdev_get_queue(inode->i_bdev);
|
||||
unsigned long long dev_size;
|
||||
/*
|
||||
* Setup the local scope queue_limits from struct request_queue->limits
|
||||
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
|
||||
*/
|
||||
q = bdev_get_queue(inode->i_bdev);
|
||||
limits = &dev_limits.limits;
|
||||
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
|
||||
limits->max_hw_sectors = queue_max_hw_sectors(q);
|
||||
limits->max_sectors = queue_max_sectors(q);
|
||||
|
||||
dev->dev_attrib.hw_block_size =
|
||||
bdev_logical_block_size(inode->i_bdev);
|
||||
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
|
||||
|
||||
/*
|
||||
* Determine the number of bytes from i_size_read() minus
|
||||
* one (1) logical sector from underlying struct block_device
|
||||
*/
|
||||
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
|
||||
dev_size = (i_size_read(file->f_mapping->host) -
|
||||
fd_dev->fd_block_size);
|
||||
|
||||
@@ -185,26 +177,18 @@ static struct se_device *fd_create_virtdevice(
|
||||
goto fail;
|
||||
}
|
||||
|
||||
limits = &dev_limits.limits;
|
||||
limits->logical_block_size = FD_BLOCKSIZE;
|
||||
limits->max_hw_sectors = FD_MAX_SECTORS;
|
||||
limits->max_sectors = FD_MAX_SECTORS;
|
||||
fd_dev->fd_block_size = FD_BLOCKSIZE;
|
||||
dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
|
||||
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
|
||||
}
|
||||
|
||||
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
|
||||
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
|
||||
fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba, &fileio_template,
|
||||
se_dev, dev_flags, fd_dev,
|
||||
&dev_limits, "FILEIO", FD_VERSION);
|
||||
if (!dev)
|
||||
goto fail;
|
||||
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
|
||||
|
||||
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
|
||||
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
|
||||
" with FDBD_HAS_BUFFERED_IO_WCE\n");
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
|
||||
dev->dev_attrib.emulate_write_cache = 1;
|
||||
}
|
||||
|
||||
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
|
||||
@@ -214,22 +198,18 @@ static struct se_device *fd_create_virtdevice(
|
||||
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
|
||||
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
|
||||
|
||||
return dev;
|
||||
return 0;
|
||||
fail:
|
||||
if (fd_dev->fd_file) {
|
||||
filp_close(fd_dev->fd_file, NULL);
|
||||
fd_dev->fd_file = NULL;
|
||||
}
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* fd_free_device(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
*/
|
||||
static void fd_free_device(void *p)
|
||||
static void fd_free_device(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = p;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
|
||||
if (fd_dev->fd_file) {
|
||||
filp_close(fd_dev->fd_file, NULL);
|
||||
@@ -243,13 +223,12 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *dev = se_dev->dev_ptr;
|
||||
struct fd_dev *dev = FD_DEV(se_dev);
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (cmd->t_task_lba *
|
||||
se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
|
||||
int ret = 0, i;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
|
||||
@@ -296,13 +275,12 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *dev = se_dev->dev_ptr;
|
||||
struct fd_dev *dev = FD_DEV(se_dev);
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (cmd->t_task_lba *
|
||||
se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
|
||||
int ret, i = 0;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
|
||||
@@ -334,7 +312,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
static int fd_execute_sync_cache(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
int immed = (cmd->t_task_cdb[1] & 0x2);
|
||||
loff_t start, end;
|
||||
int ret;
|
||||
@@ -353,7 +331,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
|
||||
start = 0;
|
||||
end = LLONG_MAX;
|
||||
} else {
|
||||
start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
start = cmd->t_task_lba * dev->dev_attrib.block_size;
|
||||
if (cmd->data_length)
|
||||
end = start + cmd->data_length;
|
||||
else
|
||||
@@ -399,11 +377,11 @@ static int fd_execute_rw(struct se_cmd *cmd)
|
||||
* Allow this to happen independent of WCE=0 setting.
|
||||
*/
|
||||
if (ret > 0 &&
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
|
||||
dev->dev_attrib.emulate_fua_write > 0 &&
|
||||
(cmd->se_cmd_flags & SCF_FUA)) {
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
loff_t start = cmd->t_task_lba *
|
||||
dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
dev->dev_attrib.block_size;
|
||||
loff_t end = start + cmd->data_length;
|
||||
|
||||
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
|
||||
@@ -430,12 +408,10 @@ static match_table_t tokens = {
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
static ssize_t fd_set_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
const char *page, ssize_t count)
|
||||
static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
|
||||
const char *page, ssize_t count)
|
||||
{
|
||||
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
char *orig, *ptr, *arg_p, *opts;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int ret = 0, arg, token;
|
||||
@@ -502,24 +478,9 @@ out:
|
||||
return (!ret) ? count : ret;
|
||||
}
|
||||
|
||||
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
|
||||
static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
|
||||
{
|
||||
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
|
||||
|
||||
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
|
||||
pr_err("Missing fd_dev_name=\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t fd_show_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
char *b)
|
||||
{
|
||||
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
ssize_t bl = 0;
|
||||
|
||||
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
|
||||
@@ -550,7 +511,7 @@ static u32 fd_get_device_type(struct se_device *dev)
|
||||
|
||||
static sector_t fd_get_blocks(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
struct file *f = fd_dev->fd_file;
|
||||
struct inode *i = f->f_mapping->host;
|
||||
unsigned long long dev_size;
|
||||
@@ -564,7 +525,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
|
||||
else
|
||||
dev_size = fd_dev->fd_dev_size;
|
||||
|
||||
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
return div_u64(dev_size, dev->dev_attrib.block_size);
|
||||
}
|
||||
|
||||
static struct spc_ops fd_spc_ops = {
|
||||
@@ -579,15 +540,16 @@ static int fd_parse_cdb(struct se_cmd *cmd)
|
||||
|
||||
static struct se_subsystem_api fileio_template = {
|
||||
.name = "fileio",
|
||||
.inquiry_prod = "FILEIO",
|
||||
.inquiry_rev = FD_VERSION,
|
||||
.owner = THIS_MODULE,
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
|
||||
.attach_hba = fd_attach_hba,
|
||||
.detach_hba = fd_detach_hba,
|
||||
.allocate_virtdevice = fd_allocate_virtdevice,
|
||||
.create_virtdevice = fd_create_virtdevice,
|
||||
.alloc_device = fd_alloc_device,
|
||||
.configure_device = fd_configure_device,
|
||||
.free_device = fd_free_device,
|
||||
.parse_cdb = fd_parse_cdb,
|
||||
.check_configfs_dev_params = fd_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = fd_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = fd_show_configfs_dev_params,
|
||||
.get_device_rev = fd_get_device_rev,
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
|
||||
|
||||
struct fd_dev {
|
||||
struct se_device dev;
|
||||
|
||||
u32 fbd_flags;
|
||||
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
|
||||
/* Unique Ramdisk Device ID in Ramdisk HBA */
|
||||
|
||||
@@ -113,7 +113,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&hba->hba_dev_list);
|
||||
spin_lock_init(&hba->device_lock);
|
||||
mutex_init(&hba->hba_access_mutex);
|
||||
|
||||
@@ -152,8 +151,7 @@ out_free_hba:
|
||||
int
|
||||
core_delete_hba(struct se_hba *hba)
|
||||
{
|
||||
if (!list_empty(&hba->hba_dev_list))
|
||||
dump_stack();
|
||||
WARN_ON(hba->dev_count);
|
||||
|
||||
hba->transport->detach_hba(hba);
|
||||
|
||||
|
||||
@@ -50,6 +50,12 @@
|
||||
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
|
||||
#define IBLOCK_BIO_POOL_SIZE 128
|
||||
|
||||
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
|
||||
{
|
||||
return container_of(dev, struct iblock_dev, dev);
|
||||
}
|
||||
|
||||
|
||||
static struct se_subsystem_api iblock_template;
|
||||
|
||||
static void iblock_bio_done(struct bio *, int);
|
||||
@@ -70,7 +76,7 @@ static void iblock_detach_hba(struct se_hba *hba)
|
||||
{
|
||||
}
|
||||
|
||||
static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
|
||||
{
|
||||
struct iblock_dev *ib_dev = NULL;
|
||||
|
||||
@@ -82,40 +88,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
|
||||
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
|
||||
|
||||
return ib_dev;
|
||||
return &ib_dev->dev;
|
||||
}
|
||||
|
||||
static struct se_device *iblock_create_virtdevice(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
void *p)
|
||||
static int iblock_configure_device(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = p;
|
||||
struct se_device *dev;
|
||||
struct se_dev_limits dev_limits;
|
||||
struct block_device *bd = NULL;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct request_queue *q;
|
||||
struct queue_limits *limits;
|
||||
u32 dev_flags = 0;
|
||||
struct block_device *bd = NULL;
|
||||
fmode_t mode;
|
||||
int ret = -EINVAL;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!ib_dev) {
|
||||
pr_err("Unable to locate struct iblock_dev parameter\n");
|
||||
return ERR_PTR(ret);
|
||||
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
|
||||
pr_err("Missing udev_path= parameters for IBLOCK\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
|
||||
|
||||
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
|
||||
if (!ib_dev->ibd_bio_set) {
|
||||
pr_err("IBLOCK: Unable to create bioset()\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
pr_err("IBLOCK: Unable to create bioset\n");
|
||||
goto out;
|
||||
}
|
||||
pr_debug("IBLOCK: Created bio_set()\n");
|
||||
/*
|
||||
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
|
||||
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
|
||||
*/
|
||||
|
||||
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
|
||||
ib_dev->ibd_udev_path);
|
||||
|
||||
@@ -126,27 +120,15 @@ static struct se_device *iblock_create_virtdevice(
|
||||
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
|
||||
if (IS_ERR(bd)) {
|
||||
ret = PTR_ERR(bd);
|
||||
goto failed;
|
||||
goto out_free_bioset;
|
||||
}
|
||||
/*
|
||||
* Setup the local scope queue_limits from struct request_queue->limits
|
||||
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
|
||||
*/
|
||||
q = bdev_get_queue(bd);
|
||||
limits = &dev_limits.limits;
|
||||
limits->logical_block_size = bdev_logical_block_size(bd);
|
||||
limits->max_hw_sectors = UINT_MAX;
|
||||
limits->max_sectors = UINT_MAX;
|
||||
dev_limits.hw_queue_depth = q->nr_requests;
|
||||
dev_limits.queue_depth = q->nr_requests;
|
||||
|
||||
ib_dev->ibd_bd = bd;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba,
|
||||
&iblock_template, se_dev, dev_flags, ib_dev,
|
||||
&dev_limits, "IBLOCK", IBLOCK_VERSION);
|
||||
if (!dev)
|
||||
goto failed;
|
||||
q = bdev_get_queue(bd);
|
||||
|
||||
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
|
||||
dev->dev_attrib.hw_max_sectors = UINT_MAX;
|
||||
dev->dev_attrib.hw_queue_depth = q->nr_requests;
|
||||
|
||||
/*
|
||||
* Check if the underlying struct block_device request_queue supports
|
||||
@@ -154,15 +136,16 @@ static struct se_device *iblock_create_virtdevice(
|
||||
* in ATA and we need to set TPE=1
|
||||
*/
|
||||
if (blk_queue_discard(q)) {
|
||||
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
|
||||
dev->dev_attrib.max_unmap_lba_count =
|
||||
q->limits.max_discard_sectors;
|
||||
|
||||
/*
|
||||
* Currently hardcoded to 1 in Linux/SCSI code..
|
||||
*/
|
||||
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
|
||||
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
|
||||
dev->dev_attrib.max_unmap_block_desc_count = 1;
|
||||
dev->dev_attrib.unmap_granularity =
|
||||
q->limits.discard_granularity >> 9;
|
||||
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
|
||||
dev->dev_attrib.unmap_granularity_alignment =
|
||||
q->limits.discard_alignment;
|
||||
|
||||
pr_debug("IBLOCK: BLOCK Discard support available,"
|
||||
@@ -170,22 +153,19 @@ static struct se_device *iblock_create_virtdevice(
|
||||
}
|
||||
|
||||
if (blk_queue_nonrot(q))
|
||||
dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
|
||||
dev->dev_attrib.is_nonrot = 1;
|
||||
return 0;
|
||||
|
||||
return dev;
|
||||
|
||||
failed:
|
||||
if (ib_dev->ibd_bio_set) {
|
||||
bioset_free(ib_dev->ibd_bio_set);
|
||||
ib_dev->ibd_bio_set = NULL;
|
||||
}
|
||||
ib_dev->ibd_bd = NULL;
|
||||
return ERR_PTR(ret);
|
||||
out_free_bioset:
|
||||
bioset_free(ib_dev->ibd_bio_set);
|
||||
ib_dev->ibd_bio_set = NULL;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iblock_free_device(void *p)
|
||||
static void iblock_free_device(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = p;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
|
||||
if (ib_dev->ibd_bd != NULL)
|
||||
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
bdev_logical_block_size(bd)) - 1);
|
||||
u32 block_size = bdev_logical_block_size(bd);
|
||||
|
||||
if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
|
||||
if (block_size == dev->dev_attrib.block_size)
|
||||
return blocks_long;
|
||||
|
||||
switch (block_size) {
|
||||
case 4096:
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
switch (dev->dev_attrib.block_size) {
|
||||
case 2048:
|
||||
blocks_long <<= 1;
|
||||
break;
|
||||
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 2048:
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
switch (dev->dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 1;
|
||||
break;
|
||||
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 1024:
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
switch (dev->dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 2;
|
||||
break;
|
||||
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 512:
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
switch (dev->dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 3;
|
||||
break;
|
||||
@@ -299,7 +279,7 @@ static void iblock_end_io_flush(struct bio *bio, int err)
|
||||
*/
|
||||
static int iblock_execute_sync_cache(struct se_cmd *cmd)
|
||||
{
|
||||
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
|
||||
int immed = (cmd->t_task_cdb[1] & 0x2);
|
||||
struct bio *bio;
|
||||
|
||||
@@ -322,7 +302,7 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
|
||||
static int iblock_execute_unmap(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_dev *ibd = dev->dev_ptr;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
unsigned char *buf, *ptr = NULL;
|
||||
sector_t lba;
|
||||
int size;
|
||||
@@ -349,7 +329,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
|
||||
else
|
||||
size = bd_dl;
|
||||
|
||||
if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
|
||||
if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
|
||||
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
@@ -366,7 +346,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
|
||||
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
|
||||
(unsigned long long)lba, range);
|
||||
|
||||
if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
|
||||
if (range > dev->dev_attrib.max_unmap_lba_count) {
|
||||
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
@@ -378,7 +358,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
|
||||
ret = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
|
||||
GFP_KERNEL, 0);
|
||||
if (ret < 0) {
|
||||
pr_err("blkdev_issue_discard() failed: %d\n",
|
||||
@@ -399,10 +379,10 @@ err:
|
||||
|
||||
static int iblock_execute_write_same(struct se_cmd *cmd)
|
||||
{
|
||||
struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
|
||||
int ret;
|
||||
|
||||
ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
|
||||
ret = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
|
||||
spc_get_write_same_sectors(cmd), GFP_KERNEL,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
@@ -425,11 +405,10 @@ static match_table_t tokens = {
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
const char *page, ssize_t count)
|
||||
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
|
||||
const char *page, ssize_t count)
|
||||
{
|
||||
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
char *orig, *ptr, *arg_p, *opts;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int ret = 0, token;
|
||||
@@ -491,43 +470,26 @@ out:
|
||||
return (!ret) ? count : ret;
|
||||
}
|
||||
|
||||
static ssize_t iblock_check_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev)
|
||||
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
|
||||
{
|
||||
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
|
||||
|
||||
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
|
||||
pr_err("Missing udev_path= parameters for IBLOCK\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t iblock_show_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
char *b)
|
||||
{
|
||||
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
|
||||
struct block_device *bd = ibd->ibd_bd;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
ssize_t bl = 0;
|
||||
|
||||
if (bd)
|
||||
bl += sprintf(b + bl, "iBlock device: %s",
|
||||
bdevname(bd, buf));
|
||||
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
|
||||
if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
|
||||
bl += sprintf(b + bl, " UDEV PATH: %s",
|
||||
ibd->ibd_udev_path);
|
||||
bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
|
||||
ib_dev->ibd_udev_path);
|
||||
bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
|
||||
|
||||
bl += sprintf(b + bl, " ");
|
||||
if (bd) {
|
||||
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
|
||||
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
|
||||
"" : (bd->bd_holder == ibd) ?
|
||||
"" : (bd->bd_holder == ib_dev) ?
|
||||
"CLAIMED: IBLOCK" : "CLAIMED: OS");
|
||||
} else {
|
||||
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
|
||||
@@ -556,7 +518,7 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
|
||||
static struct bio *
|
||||
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
|
||||
{
|
||||
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
|
||||
struct bio *bio;
|
||||
|
||||
/*
|
||||
@@ -611,8 +573,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
|
||||
* Force data to disk if we pretend to not have a volatile
|
||||
* write cache, or the initiator set the Force Unit Access bit.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
|
||||
if (dev->dev_attrib.emulate_write_cache == 0 ||
|
||||
(dev->dev_attrib.emulate_fua_write > 0 &&
|
||||
(cmd->se_cmd_flags & SCF_FUA)))
|
||||
rw = WRITE_FUA;
|
||||
else
|
||||
@@ -625,17 +587,17 @@ static int iblock_execute_rw(struct se_cmd *cmd)
|
||||
* Convert the blocksize advertised to the initiator to the 512 byte
|
||||
* units unconditionally used by the Linux block layer.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
|
||||
if (dev->dev_attrib.block_size == 4096)
|
||||
block_lba = (cmd->t_task_lba << 3);
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
|
||||
else if (dev->dev_attrib.block_size == 2048)
|
||||
block_lba = (cmd->t_task_lba << 2);
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
|
||||
else if (dev->dev_attrib.block_size == 1024)
|
||||
block_lba = (cmd->t_task_lba << 1);
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
|
||||
else if (dev->dev_attrib.block_size == 512)
|
||||
block_lba = cmd->t_task_lba;
|
||||
else {
|
||||
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
|
||||
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
" %u\n", dev->dev_attrib.block_size);
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return -ENOSYS;
|
||||
}
|
||||
@@ -714,8 +676,8 @@ static u32 iblock_get_device_type(struct se_device *dev)
|
||||
|
||||
static sector_t iblock_get_blocks(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ibd = dev->dev_ptr;
|
||||
struct block_device *bd = ibd->ibd_bd;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
struct request_queue *q = bdev_get_queue(bd);
|
||||
|
||||
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
|
||||
@@ -761,15 +723,16 @@ static int iblock_parse_cdb(struct se_cmd *cmd)
|
||||
|
||||
static struct se_subsystem_api iblock_template = {
|
||||
.name = "iblock",
|
||||
.inquiry_prod = "IBLOCK",
|
||||
.inquiry_rev = IBLOCK_VERSION,
|
||||
.owner = THIS_MODULE,
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
|
||||
.attach_hba = iblock_attach_hba,
|
||||
.detach_hba = iblock_detach_hba,
|
||||
.allocate_virtdevice = iblock_allocate_virtdevice,
|
||||
.create_virtdevice = iblock_create_virtdevice,
|
||||
.alloc_device = iblock_alloc_device,
|
||||
.configure_device = iblock_configure_device,
|
||||
.free_device = iblock_free_device,
|
||||
.parse_cdb = iblock_parse_cdb,
|
||||
.check_configfs_dev_params = iblock_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = iblock_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = iblock_show_configfs_dev_params,
|
||||
.get_device_rev = iblock_get_device_rev,
|
||||
|
||||
@@ -14,6 +14,7 @@ struct iblock_req {
|
||||
#define IBDF_HAS_UDEV_PATH 0x01
|
||||
|
||||
struct iblock_dev {
|
||||
struct se_device dev;
|
||||
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
|
||||
u32 ibd_flags;
|
||||
struct bio_set *ibd_bio_set;
|
||||
|
||||
@@ -20,12 +20,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
|
||||
void core_dev_unexport(struct se_device *, struct se_portal_group *,
|
||||
struct se_lun *);
|
||||
int target_report_luns(struct se_cmd *);
|
||||
void se_release_device_for_hba(struct se_device *);
|
||||
void se_release_vpd_for_dev(struct se_device *);
|
||||
int se_free_virtual_device(struct se_device *, struct se_hba *);
|
||||
int se_dev_check_online(struct se_device *);
|
||||
int se_dev_check_shutdown(struct se_device *);
|
||||
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
|
||||
int se_dev_set_task_timeout(struct se_device *, u32);
|
||||
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
|
||||
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
|
||||
@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_lun_acl *lacl);
|
||||
int core_dev_setup_virtual_lun0(void);
|
||||
void core_dev_release_virtual_lun0(void);
|
||||
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
|
||||
int target_configure_device(struct se_device *dev);
|
||||
void target_free_device(struct se_device *);
|
||||
|
||||
/* target_core_hba.c */
|
||||
struct se_hba *core_alloc_hba(const char *, u32, u32);
|
||||
@@ -106,9 +103,10 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
|
||||
int transport_clear_lun_from_sessions(struct se_lun *);
|
||||
void transport_send_task_abort(struct se_cmd *);
|
||||
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
|
||||
void target_qf_do_work(struct work_struct *work);
|
||||
|
||||
/* target_core_stat.c */
|
||||
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
|
||||
void target_stat_setup_dev_default_groups(struct se_device *);
|
||||
void target_stat_setup_port_default_groups(struct se_lun *);
|
||||
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
|
||||
if (!(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID)) {
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -120,10 +120,10 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
|
||||
static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_session *se_sess = cmd->se_sess;
|
||||
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_pr_registration *pr_reg;
|
||||
struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
|
||||
int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
int crh = (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
|
||||
int conflict = 0;
|
||||
|
||||
if (!crh)
|
||||
@@ -223,10 +223,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
|
||||
goto out_unlock;
|
||||
|
||||
dev->dev_reserved_node_acl = NULL;
|
||||
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
|
||||
if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
|
||||
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
|
||||
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
|
||||
dev->dev_res_bin_isid = 0;
|
||||
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
|
||||
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
|
||||
}
|
||||
tpg = sess->se_tpg;
|
||||
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
|
||||
@@ -292,10 +292,10 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
dev->dev_reserved_node_acl = sess->se_node_acl;
|
||||
dev->dev_flags |= DF_SPC2_RESERVATIONS;
|
||||
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
|
||||
if (sess->sess_bin_isid != 0) {
|
||||
dev->dev_res_bin_isid = sess->sess_bin_isid;
|
||||
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
|
||||
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
|
||||
}
|
||||
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
|
||||
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
@@ -333,7 +333,7 @@ static int core_scsi3_pr_seq_non_holder(
|
||||
/*
|
||||
* A legacy SPC-2 reservation is being held.
|
||||
*/
|
||||
if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
|
||||
if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
|
||||
return core_scsi2_reservation_seq_non_holder(cmd,
|
||||
cdb, pr_reg_type);
|
||||
|
||||
@@ -565,8 +565,8 @@ static int core_scsi3_pr_seq_non_holder(
|
||||
|
||||
static u32 core_scsi3_pr_generation(struct se_device *dev)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
u32 prg;
|
||||
|
||||
/*
|
||||
* PRGeneration field shall contain the value of a 32-bit wrapping
|
||||
* counter mainted by the device server.
|
||||
@@ -577,7 +577,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
|
||||
* See spc4r17 section 6.3.12 READ_KEYS service action
|
||||
*/
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
prg = su_dev->t10_pr.pr_generation++;
|
||||
prg = dev->t10_pr.pr_generation++;
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
|
||||
return prg;
|
||||
@@ -596,7 +596,7 @@ static int core_scsi3_pr_reservation_check(
|
||||
/*
|
||||
* A legacy SPC-2 reservation is being held.
|
||||
*/
|
||||
if (dev->dev_flags & DF_SPC2_RESERVATIONS)
|
||||
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
|
||||
return core_scsi2_reservation_check(cmd, pr_reg_type);
|
||||
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
@@ -636,7 +636,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
|
||||
int all_tg_pt,
|
||||
int aptpl)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct t10_pr_registration *pr_reg;
|
||||
|
||||
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
|
||||
@@ -645,7 +644,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
|
||||
pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
|
||||
GFP_ATOMIC);
|
||||
if (!pr_reg->pr_aptpl_buf) {
|
||||
pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
|
||||
@@ -929,7 +928,7 @@ static int __core_scsi3_check_aptpl_registration(
|
||||
struct se_dev_entry *deve)
|
||||
{
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
|
||||
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
|
||||
u16 tpgt;
|
||||
@@ -996,11 +995,10 @@ int core_scsi3_check_aptpl_registration(
|
||||
struct se_lun *lun,
|
||||
struct se_lun_acl *lun_acl)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
|
||||
struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
|
||||
|
||||
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
|
||||
if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
|
||||
return 0;
|
||||
|
||||
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
|
||||
@@ -1051,10 +1049,9 @@ static void __core_scsi3_add_registration(
|
||||
int register_type,
|
||||
int register_move)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
|
||||
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
|
||||
/*
|
||||
* Increment PRgeneration counter for struct se_device upon a successful
|
||||
@@ -1066,7 +1063,7 @@ static void __core_scsi3_add_registration(
|
||||
* for the REGISTER.
|
||||
*/
|
||||
pr_reg->pr_res_generation = (register_move) ?
|
||||
su_dev->t10_pr.pr_generation++ :
|
||||
dev->t10_pr.pr_generation++ :
|
||||
core_scsi3_pr_generation(dev);
|
||||
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
@@ -1135,7 +1132,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
|
||||
struct se_node_acl *nacl,
|
||||
unsigned char *isid)
|
||||
{
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
|
||||
struct se_portal_group *tpg;
|
||||
|
||||
@@ -1160,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
|
||||
* for fabric modules (iSCSI) requiring them.
|
||||
*/
|
||||
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
|
||||
if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
|
||||
if (dev->dev_attrib.enforce_pr_isids)
|
||||
continue;
|
||||
}
|
||||
atomic_inc(&pr_reg->pr_res_holders);
|
||||
@@ -1274,7 +1271,7 @@ static void __core_scsi3_free_registration(
|
||||
{
|
||||
struct target_core_fabric_ops *tfo =
|
||||
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
int prf_isid;
|
||||
|
||||
@@ -1335,7 +1332,7 @@ void core_scsi3_free_pr_reg_from_nacl(
|
||||
struct se_device *dev,
|
||||
struct se_node_acl *nacl)
|
||||
{
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
|
||||
/*
|
||||
* If the passed se_node_acl matches the reservation holder,
|
||||
@@ -1365,7 +1362,7 @@ void core_scsi3_free_pr_reg_from_nacl(
|
||||
void core_scsi3_free_all_registrations(
|
||||
struct se_device *dev)
|
||||
{
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
|
||||
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
@@ -1899,7 +1896,6 @@ static int __core_scsi3_update_aptpl_buf(
|
||||
{
|
||||
struct se_lun *lun;
|
||||
struct se_portal_group *tpg;
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct t10_pr_registration *pr_reg;
|
||||
unsigned char tmp[512], isid_buf[32];
|
||||
ssize_t len = 0;
|
||||
@@ -1917,8 +1913,8 @@ static int __core_scsi3_update_aptpl_buf(
|
||||
/*
|
||||
* Walk the registration list..
|
||||
*/
|
||||
spin_lock(&su_dev->t10_pr.registration_lock);
|
||||
list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
|
||||
spin_lock(&dev->t10_pr.registration_lock);
|
||||
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
|
||||
pr_reg_list) {
|
||||
|
||||
tmp[0] = '\0';
|
||||
@@ -1963,7 +1959,7 @@ static int __core_scsi3_update_aptpl_buf(
|
||||
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
|
||||
pr_err("Unable to update renaming"
|
||||
" APTPL metadata\n");
|
||||
spin_unlock(&su_dev->t10_pr.registration_lock);
|
||||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
len += sprintf(buf+len, "%s", tmp);
|
||||
@@ -1981,13 +1977,13 @@ static int __core_scsi3_update_aptpl_buf(
|
||||
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
|
||||
pr_err("Unable to update renaming"
|
||||
" APTPL metadata\n");
|
||||
spin_unlock(&su_dev->t10_pr.registration_lock);
|
||||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
len += sprintf(buf+len, "%s", tmp);
|
||||
reg_count++;
|
||||
}
|
||||
spin_unlock(&su_dev->t10_pr.registration_lock);
|
||||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
|
||||
if (!reg_count)
|
||||
len += sprintf(buf+len, "No Registrations or Reservations");
|
||||
@@ -2019,7 +2015,7 @@ static int __core_scsi3_write_aptpl_to_file(
|
||||
unsigned char *buf,
|
||||
u32 pr_aptpl_buf_len)
|
||||
{
|
||||
struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
|
||||
struct t10_wwn *wwn = &dev->t10_wwn;
|
||||
struct file *file;
|
||||
struct iovec iov[1];
|
||||
mm_segment_t old_fs;
|
||||
@@ -2120,7 +2116,7 @@ static int core_scsi3_emulate_pro_register(
|
||||
struct se_lun *se_lun = cmd->se_lun;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
/* Used for APTPL metadata w/ UNREGISTER */
|
||||
unsigned char *pr_aptpl_buf = NULL;
|
||||
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
|
||||
@@ -2434,7 +2430,7 @@ static int core_scsi3_pro_reserve(
|
||||
struct se_session *se_sess = cmd->se_sess;
|
||||
struct se_lun *se_lun = cmd->se_lun;
|
||||
struct t10_pr_registration *pr_reg, *pr_res_holder;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
int ret, prf_isid;
|
||||
|
||||
@@ -2667,7 +2663,7 @@ static int core_scsi3_emulate_pro_release(
|
||||
struct se_session *se_sess = cmd->se_sess;
|
||||
struct se_lun *se_lun = cmd->se_lun;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
int ret, all_reg = 0;
|
||||
|
||||
if (!se_sess || !se_lun) {
|
||||
@@ -2836,7 +2832,7 @@ static int core_scsi3_emulate_pro_clear(
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_node_acl *pr_reg_nacl;
|
||||
struct se_session *se_sess = cmd->se_sess;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
|
||||
u32 pr_res_mapped_lun = 0;
|
||||
int calling_it_nexus = 0;
|
||||
@@ -3006,7 +3002,7 @@ static int core_scsi3_pro_preempt(
|
||||
struct se_session *se_sess = cmd->se_sess;
|
||||
LIST_HEAD(preempt_and_abort_list);
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
u32 pr_res_mapped_lun = 0;
|
||||
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
|
||||
int prh_type = 0, prh_scope = 0, ret;
|
||||
@@ -3358,7 +3354,7 @@ static int core_scsi3_emulate_pro_register_and_move(
|
||||
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
|
||||
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
|
||||
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char *buf;
|
||||
unsigned char *initiator_str;
|
||||
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
|
||||
@@ -3823,7 +3819,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||
* initiator or service action and shall terminate with a RESERVATION
|
||||
* CONFLICT status.
|
||||
*/
|
||||
if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
|
||||
if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
|
||||
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
|
||||
" SPC-2 reservation is held, returning"
|
||||
" RESERVATION_CONFLICT\n");
|
||||
@@ -3959,8 +3955,7 @@ out:
|
||||
*/
|
||||
static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_pr_registration *pr_reg;
|
||||
unsigned char *buf;
|
||||
u32 add_len = 0, off = 8;
|
||||
@@ -3973,13 +3968,13 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
|
||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
||||
|
||||
spin_lock(&su_dev->t10_pr.registration_lock);
|
||||
list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
|
||||
spin_lock(&dev->t10_pr.registration_lock);
|
||||
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
|
||||
pr_reg_list) {
|
||||
/*
|
||||
* Check for overflow of 8byte PRI READ_KEYS payload and
|
||||
@@ -3999,7 +3994,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||
|
||||
add_len += 8;
|
||||
}
|
||||
spin_unlock(&su_dev->t10_pr.registration_lock);
|
||||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
|
||||
buf[4] = ((add_len >> 24) & 0xff);
|
||||
buf[5] = ((add_len >> 16) & 0xff);
|
||||
@@ -4018,8 +4013,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||
*/
|
||||
static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_pr_registration *pr_reg;
|
||||
unsigned char *buf;
|
||||
u64 pr_res_key;
|
||||
@@ -4033,13 +4027,13 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
|
||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
||||
|
||||
spin_lock(&se_dev->dev_reservation_lock);
|
||||
pr_reg = se_dev->dev_pr_res_holder;
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
pr_reg = dev->dev_pr_res_holder;
|
||||
if (pr_reg) {
|
||||
/*
|
||||
* Set the hardcoded Additional Length
|
||||
@@ -4090,7 +4084,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
err:
|
||||
spin_unlock(&se_dev->dev_reservation_lock);
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
return 0;
|
||||
@@ -4104,7 +4098,7 @@ err:
|
||||
static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char *buf;
|
||||
u16 add_len = 8; /* Hardcoded to 8. */
|
||||
|
||||
@@ -4159,12 +4153,11 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
||||
*/
|
||||
static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_node_acl *se_nacl;
|
||||
struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
|
||||
struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char *buf;
|
||||
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
|
||||
u32 off = 8; /* off into first Full Status descriptor */
|
||||
@@ -4179,10 +4172,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
|
||||
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
|
||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
||||
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
|
||||
@@ -4316,7 +4309,7 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
|
||||
* initiator or service action and shall terminate with a RESERVATION
|
||||
* CONFLICT status.
|
||||
*/
|
||||
if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
|
||||
if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
|
||||
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
|
||||
" SPC-2 reservation is held, returning"
|
||||
" RESERVATION_CONFLICT\n");
|
||||
@@ -4363,30 +4356,25 @@ static int core_pt_seq_non_holder(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int core_setup_reservations(struct se_device *dev, int force_pt)
|
||||
void core_setup_reservations(struct se_device *dev)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
struct t10_reservation *rest = &su_dev->t10_pr;
|
||||
struct t10_reservation *rest = &dev->t10_pr;
|
||||
|
||||
/*
|
||||
* If this device is from Target_Core_Mod/pSCSI, use the reservations
|
||||
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
|
||||
* cause a problem because libata and some SATA RAID HBAs appear
|
||||
* under Linux/SCSI, but to emulate reservations themselves.
|
||||
*/
|
||||
if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
|
||||
!(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
|
||||
if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) ||
|
||||
(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV &&
|
||||
!dev->dev_attrib.emulate_reservations)) {
|
||||
rest->res_type = SPC_PASSTHROUGH;
|
||||
rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
|
||||
rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
|
||||
pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
|
||||
" emulation\n", dev->transport->name);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* If SPC-3 or above is reported by real or emulated struct se_device,
|
||||
* use emulated Persistent Reservations.
|
||||
*/
|
||||
if (dev->transport->get_device_rev(dev) >= SCSI_3) {
|
||||
} else if (dev->transport->get_device_rev(dev) >= SCSI_3) {
|
||||
rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
|
||||
rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
|
||||
rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
|
||||
@@ -4400,6 +4388,4 @@ int core_setup_reservations(struct se_device *dev, int force_pt)
|
||||
pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
|
||||
dev->transport->name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -63,6 +63,6 @@ extern unsigned char *core_scsi3_pr_dump_type(int);
|
||||
|
||||
extern int target_scsi3_emulate_pr_in(struct se_cmd *);
|
||||
extern int target_scsi3_emulate_pr_out(struct se_cmd *);
|
||||
extern int core_setup_reservations(struct se_device *, int);
|
||||
extern void core_setup_reservations(struct se_device *);
|
||||
|
||||
#endif /* TARGET_CORE_PR_H */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
|
||||
#define PDF_HAS_VIRT_HOST_ID 0x20
|
||||
|
||||
struct pscsi_dev_virt {
|
||||
struct se_device dev;
|
||||
int pdv_flags;
|
||||
int pdv_host_id;
|
||||
int pdv_channel_id;
|
||||
@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
|
||||
int pdv_lun_id;
|
||||
struct block_device *pdv_bd;
|
||||
struct scsi_device *pdv_sd;
|
||||
struct se_hba *pdv_se_hba;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
typedef enum phv_modes {
|
||||
|
||||
@@ -41,7 +41,10 @@
|
||||
|
||||
#include "target_core_rd.h"
|
||||
|
||||
static struct se_subsystem_api rd_mcp_template;
|
||||
static inline struct rd_dev *RD_DEV(struct se_device *dev)
|
||||
{
|
||||
return container_of(dev, struct rd_dev, dev);
|
||||
}
|
||||
|
||||
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
@@ -196,7 +199,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
|
||||
{
|
||||
struct rd_dev *rd_dev;
|
||||
struct rd_host *rd_host = hba->hba_ptr;
|
||||
@@ -209,39 +212,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
|
||||
rd_dev->rd_host = rd_host;
|
||||
|
||||
return rd_dev;
|
||||
return &rd_dev->dev;
|
||||
}
|
||||
|
||||
static struct se_device *rd_create_virtdevice(struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev, void *p)
|
||||
static int rd_configure_device(struct se_device *dev)
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_dev_limits dev_limits;
|
||||
struct rd_dev *rd_dev = p;
|
||||
struct rd_host *rd_host = hba->hba_ptr;
|
||||
int dev_flags = 0, ret;
|
||||
char prod[16], rev[4];
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
struct rd_host *rd_host = dev->se_hba->hba_ptr;
|
||||
int ret;
|
||||
|
||||
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
|
||||
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
|
||||
pr_debug("Missing rd_pages= parameter\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rd_build_device_space(rd_dev);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
snprintf(prod, 16, "RAMDISK-MCP");
|
||||
snprintf(rev, 4, "%s", RD_MCP_VERSION);
|
||||
|
||||
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
|
||||
dev_limits.limits.max_hw_sectors = UINT_MAX;
|
||||
dev_limits.limits.max_sectors = UINT_MAX;
|
||||
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
|
||||
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba,
|
||||
&rd_mcp_template, se_dev, dev_flags, rd_dev,
|
||||
&dev_limits, prod, rev);
|
||||
if (!dev)
|
||||
goto fail;
|
||||
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
|
||||
dev->dev_attrib.hw_max_sectors = UINT_MAX;
|
||||
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
|
||||
|
||||
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
|
||||
|
||||
@@ -251,16 +242,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
|
||||
rd_dev->sg_table_count,
|
||||
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
|
||||
|
||||
return dev;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
rd_release_device_space(rd_dev);
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rd_free_device(void *p)
|
||||
static void rd_free_device(struct se_device *dev)
|
||||
{
|
||||
struct rd_dev *rd_dev = p;
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
|
||||
rd_release_device_space(rd_dev);
|
||||
kfree(rd_dev);
|
||||
@@ -290,7 +281,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
|
||||
u32 sgl_nents = cmd->t_data_nents;
|
||||
enum dma_data_direction data_direction = cmd->data_direction;
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct rd_dev *dev = se_dev->dev_ptr;
|
||||
struct rd_dev *dev = RD_DEV(se_dev);
|
||||
struct rd_dev_sg_table *table;
|
||||
struct scatterlist *rd_sg;
|
||||
struct sg_mapping_iter m;
|
||||
@@ -300,7 +291,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
|
||||
u32 src_len;
|
||||
u64 tmp;
|
||||
|
||||
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
|
||||
rd_offset = do_div(tmp, PAGE_SIZE);
|
||||
rd_page = tmp;
|
||||
rd_size = cmd->data_length;
|
||||
@@ -378,13 +369,10 @@ static match_table_t tokens = {
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
static ssize_t rd_set_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
const char *page,
|
||||
ssize_t count)
|
||||
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
|
||||
const char *page, ssize_t count)
|
||||
{
|
||||
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
char *orig, *ptr, *opts;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int ret = 0, arg, token;
|
||||
@@ -417,24 +405,10 @@ static ssize_t rd_set_configfs_dev_params(
|
||||
return (!ret) ? count : ret;
|
||||
}
|
||||
|
||||
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
|
||||
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
|
||||
{
|
||||
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
|
||||
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
|
||||
pr_debug("Missing rd_pages= parameter\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t rd_show_configfs_dev_params(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
char *b)
|
||||
{
|
||||
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
|
||||
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
|
||||
rd_dev->rd_dev_id);
|
||||
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
|
||||
@@ -455,9 +429,10 @@ static u32 rd_get_device_type(struct se_device *dev)
|
||||
|
||||
static sector_t rd_get_blocks(struct se_device *dev)
|
||||
{
|
||||
struct rd_dev *rd_dev = dev->dev_ptr;
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
|
||||
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
|
||||
dev->se_sub_dev->se_dev_attrib.block_size) - 1;
|
||||
dev->dev_attrib.block_size) - 1;
|
||||
|
||||
return blocks_long;
|
||||
}
|
||||
@@ -473,14 +448,15 @@ static int rd_parse_cdb(struct se_cmd *cmd)
|
||||
|
||||
static struct se_subsystem_api rd_mcp_template = {
|
||||
.name = "rd_mcp",
|
||||
.inquiry_prod = "RAMDISK-MCP",
|
||||
.inquiry_rev = RD_MCP_VERSION,
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
|
||||
.attach_hba = rd_attach_hba,
|
||||
.detach_hba = rd_detach_hba,
|
||||
.allocate_virtdevice = rd_allocate_virtdevice,
|
||||
.create_virtdevice = rd_create_virtdevice,
|
||||
.alloc_device = rd_alloc_device,
|
||||
.configure_device = rd_configure_device,
|
||||
.free_device = rd_free_device,
|
||||
.parse_cdb = rd_parse_cdb,
|
||||
.check_configfs_dev_params = rd_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = rd_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = rd_show_configfs_dev_params,
|
||||
.get_device_rev = rd_get_device_rev,
|
||||
|
||||
@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
|
||||
#define RDF_HAS_PAGE_COUNT 0x01
|
||||
|
||||
struct rd_dev {
|
||||
struct se_device dev;
|
||||
u32 rd_flags;
|
||||
/* Unique Ramdisk Device ID in Ramdisk HBA */
|
||||
u32 rd_dev_id;
|
||||
|
||||
@@ -54,10 +54,10 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
|
||||
buf[1] = (blocks >> 16) & 0xff;
|
||||
buf[2] = (blocks >> 8) & 0xff;
|
||||
buf[3] = blocks & 0xff;
|
||||
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
|
||||
buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[7] = dev->dev_attrib.block_size & 0xff;
|
||||
|
||||
rbuf = transport_kmap_data_sg(cmd);
|
||||
if (rbuf) {
|
||||
@@ -85,15 +85,15 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
||||
buf[5] = (blocks >> 16) & 0xff;
|
||||
buf[6] = (blocks >> 8) & 0xff;
|
||||
buf[7] = blocks & 0xff;
|
||||
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
|
||||
buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[11] = dev->dev_attrib.block_size & 0xff;
|
||||
/*
|
||||
* Set Thin Provisioning Enable bit following sbc3r22 in section
|
||||
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
|
||||
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
|
||||
buf[14] = 0x80;
|
||||
|
||||
rbuf = transport_kmap_data_sg(cmd);
|
||||
@@ -143,7 +143,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
|
||||
|
||||
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
|
||||
{
|
||||
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
|
||||
return cmd->se_dev->dev_attrib.block_size * sectors;
|
||||
}
|
||||
|
||||
static int sbc_check_valid_sectors(struct se_cmd *cmd)
|
||||
@@ -152,7 +152,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
|
||||
unsigned long long end_lba;
|
||||
u32 sectors;
|
||||
|
||||
sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
sectors = cmd->data_length / dev->dev_attrib.block_size;
|
||||
end_lba = dev->transport->get_blocks(dev) + 1;
|
||||
|
||||
if (cmd->t_task_lba + sectors > end_lba) {
|
||||
@@ -315,7 +315,6 @@ out:
|
||||
|
||||
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
|
||||
{
|
||||
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
unsigned int size;
|
||||
@@ -562,18 +561,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
|
||||
unsigned long long end_lba;
|
||||
|
||||
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
|
||||
if (sectors > dev->dev_attrib.fabric_max_sectors) {
|
||||
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
|
||||
" big sectors %u exceeds fabric_max_sectors:"
|
||||
" %u\n", cdb[0], sectors,
|
||||
su_dev->se_dev_attrib.fabric_max_sectors);
|
||||
dev->dev_attrib.fabric_max_sectors);
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
|
||||
if (sectors > dev->dev_attrib.hw_max_sectors) {
|
||||
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
|
||||
" big sectors %u exceeds backend hw_max_sectors:"
|
||||
" %u\n", cdb[0], sectors,
|
||||
su_dev->se_dev_attrib.hw_max_sectors);
|
||||
dev->dev_attrib.hw_max_sectors);
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
|
||||
|
||||
@@ -95,14 +95,14 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
|
||||
/*
|
||||
* Enable SCCS and TPGS fields for Emulated ALUA
|
||||
*/
|
||||
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
|
||||
if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
|
||||
spc_fill_alua_data(lun->lun_sep, buf);
|
||||
|
||||
buf[7] = 0x2; /* CmdQue=1 */
|
||||
|
||||
snprintf(&buf[8], 8, "LIO-ORG");
|
||||
snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
|
||||
snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
|
||||
snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
|
||||
snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
|
||||
buf[4] = 31; /* Set additional length to 31 */
|
||||
|
||||
return 0;
|
||||
@@ -114,15 +114,13 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
u16 len = 0;
|
||||
|
||||
if (dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
u32 unit_serial_len;
|
||||
|
||||
unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
|
||||
unit_serial_len = strlen(dev->t10_wwn.unit_serial);
|
||||
unit_serial_len++; /* For NULL Terminator */
|
||||
|
||||
len += sprintf(&buf[4], "%s",
|
||||
dev->se_sub_dev->t10_wwn.unit_serial);
|
||||
len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
|
||||
len++; /* Extra Byte for NULL Terminator */
|
||||
buf[3] = len;
|
||||
}
|
||||
@@ -132,7 +130,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
|
||||
static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
|
||||
unsigned char *buf)
|
||||
{
|
||||
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
|
||||
unsigned char *p = &dev->t10_wwn.unit_serial[0];
|
||||
int cnt;
|
||||
bool next = true;
|
||||
|
||||
@@ -173,7 +171,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
|
||||
unsigned char *prod = &dev->t10_wwn.model[0];
|
||||
u32 prod_len;
|
||||
u32 unit_serial_len, off = 0;
|
||||
u16 len = 0, id_len;
|
||||
@@ -188,7 +186,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
|
||||
* value in order to return the NAA id.
|
||||
*/
|
||||
if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
|
||||
if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
|
||||
goto check_t10_vend_desc;
|
||||
|
||||
/* CODE SET == Binary */
|
||||
@@ -236,14 +234,12 @@ check_t10_vend_desc:
|
||||
prod_len += strlen(prod);
|
||||
prod_len++; /* For : */
|
||||
|
||||
if (dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
unit_serial_len =
|
||||
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
|
||||
unit_serial_len++; /* For NULL Terminator */
|
||||
|
||||
id_len += sprintf(&buf[off+12], "%s:%s", prod,
|
||||
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
&dev->t10_wwn.unit_serial[0]);
|
||||
}
|
||||
buf[off] = 0x2; /* ASCII */
|
||||
buf[off+1] = 0x1; /* T10 Vendor ID */
|
||||
@@ -298,8 +294,7 @@ check_t10_vend_desc:
|
||||
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
|
||||
* section 7.5.1 Table 362
|
||||
*/
|
||||
if (dev->se_sub_dev->t10_alua.alua_type !=
|
||||
SPC3_ALUA_EMULATED)
|
||||
if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
|
||||
goto check_scsi_name;
|
||||
|
||||
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
|
||||
@@ -422,7 +417,7 @@ static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
|
||||
buf[5] = 0x07;
|
||||
|
||||
/* If WriteCache emulation is enabled, set V_SUP */
|
||||
if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
|
||||
if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
|
||||
buf[6] = 0x01;
|
||||
return 0;
|
||||
}
|
||||
@@ -439,7 +434,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
|
||||
* different page length for Thin Provisioning.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
|
||||
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
|
||||
have_tp = 1;
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
@@ -456,14 +451,14 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
/*
|
||||
* Set MAXIMUM TRANSFER LENGTH
|
||||
*/
|
||||
max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
|
||||
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
|
||||
max_sectors = min(dev->dev_attrib.fabric_max_sectors,
|
||||
dev->dev_attrib.hw_max_sectors);
|
||||
put_unaligned_be32(max_sectors, &buf[8]);
|
||||
|
||||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
|
||||
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
|
||||
|
||||
/*
|
||||
* Exit now if we don't support TP.
|
||||
@@ -474,25 +469,25 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
/*
|
||||
* Set MAXIMUM UNMAP LBA COUNT
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
|
||||
put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
|
||||
|
||||
/*
|
||||
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
|
||||
put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
|
||||
&buf[24]);
|
||||
|
||||
/*
|
||||
* Set OPTIMAL UNMAP GRANULARITY
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
|
||||
put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
|
||||
|
||||
/*
|
||||
* UNMAP GRANULARITY ALIGNMENT
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
|
||||
put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
|
||||
&buf[32]);
|
||||
if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
|
||||
if (dev->dev_attrib.unmap_granularity_alignment != 0)
|
||||
buf[32] |= 0x80; /* Set the UGAVALID bit */
|
||||
|
||||
return 0;
|
||||
@@ -505,7 +500,7 @@ static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
buf[3] = 0x3c;
|
||||
buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
|
||||
buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -546,7 +541,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
|
||||
* that the device server does not support the UNMAP command.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
|
||||
if (dev->dev_attrib.emulate_tpu != 0)
|
||||
buf[5] = 0x80;
|
||||
|
||||
/*
|
||||
@@ -555,7 +550,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
* A TPWS bit set to zero indicates that the device server does not
|
||||
* support the use of the WRITE SAME (16) command to unmap LBAs.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
|
||||
if (dev->dev_attrib.emulate_tpws != 0)
|
||||
buf[5] |= 0x40;
|
||||
|
||||
return 0;
|
||||
@@ -586,8 +581,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
|
||||
* Registered Extended LUN WWN has been set via ConfigFS
|
||||
* during device creation/restart.
|
||||
*/
|
||||
if (cmd->se_dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
buf[3] = ARRAY_SIZE(evpd_handlers);
|
||||
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
|
||||
buf[p + 4] = evpd_handlers[p].page;
|
||||
@@ -690,7 +684,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
* command sequence order shall be explicitly handled by the application client
|
||||
* through the selection of appropriate ommands and task attributes.
|
||||
*/
|
||||
p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
|
||||
p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
|
||||
/*
|
||||
* From spc4r17, section 7.4.6 Control mode Page
|
||||
*
|
||||
@@ -720,8 +714,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
|
||||
* to the number of commands completed with one of those status codes.
|
||||
*/
|
||||
p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
|
||||
p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
|
||||
(dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
|
||||
/*
|
||||
* From spc4r17, section 7.4.6 Control mode Page
|
||||
*
|
||||
@@ -734,7 +728,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
* which the command was received shall be completed with TASK ABORTED
|
||||
* status (see SAM-4).
|
||||
*/
|
||||
p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
|
||||
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
|
||||
p[8] = 0xff;
|
||||
p[9] = 0xff;
|
||||
p[11] = 30;
|
||||
@@ -746,7 +740,7 @@ static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
|
||||
{
|
||||
p[0] = 0x08;
|
||||
p[1] = 0x12;
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
|
||||
if (dev->dev_attrib.emulate_write_cache > 0)
|
||||
p[2] = 0x04; /* Write Cache Enable */
|
||||
p[12] = 0x20; /* Disabled Read Ahead */
|
||||
|
||||
@@ -826,8 +820,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
|
||||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
|
||||
spc_modesense_write_protect(&buf[3], type);
|
||||
|
||||
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
|
||||
if ((dev->dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->dev_attrib.emulate_fua_write > 0))
|
||||
spc_modesense_dpofua(&buf[3], type);
|
||||
} else {
|
||||
offset -= 1;
|
||||
@@ -839,8 +833,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
|
||||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
|
||||
spc_modesense_write_protect(&buf[2], type);
|
||||
|
||||
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
|
||||
if ((dev->dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->dev_attrib.emulate_fua_write > 0))
|
||||
spc_modesense_dpofua(&buf[2], type);
|
||||
}
|
||||
|
||||
@@ -923,7 +917,6 @@ static int spc_emulate_testunitready(struct se_cmd *cmd)
|
||||
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
|
||||
switch (cdb[0]) {
|
||||
@@ -946,12 +939,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
*size = (cdb[7] << 8) + cdb[8];
|
||||
break;
|
||||
case PERSISTENT_RESERVE_IN:
|
||||
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
|
||||
if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
|
||||
cmd->execute_cmd = target_scsi3_emulate_pr_in;
|
||||
*size = (cdb[7] << 8) + cdb[8];
|
||||
break;
|
||||
case PERSISTENT_RESERVE_OUT:
|
||||
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
|
||||
if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
|
||||
cmd->execute_cmd = target_scsi3_emulate_pr_out;
|
||||
*size = (cdb[7] << 8) + cdb[8];
|
||||
break;
|
||||
@@ -962,7 +955,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
else
|
||||
*size = cmd->data_length;
|
||||
|
||||
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
|
||||
if (dev->t10_pr.res_type != SPC_PASSTHROUGH)
|
||||
cmd->execute_cmd = target_scsi2_reservation_release;
|
||||
break;
|
||||
case RESERVE:
|
||||
@@ -983,7 +976,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
* is running in SPC_PASSTHROUGH, and wants reservations
|
||||
* emulation disabled.
|
||||
*/
|
||||
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
|
||||
if (dev->t10_pr.res_type != SPC_PASSTHROUGH)
|
||||
cmd->execute_cmd = target_scsi2_reservation_reserve;
|
||||
break;
|
||||
case REQUEST_SENSE:
|
||||
@@ -1040,7 +1033,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
* Check for emulated MI_REPORT_TARGET_PGS
|
||||
*/
|
||||
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
|
||||
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
||||
dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
||||
cmd->execute_cmd =
|
||||
target_emulate_report_target_port_groups;
|
||||
}
|
||||
@@ -1059,7 +1052,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
* Check for emulated MO_SET_TARGET_PGS.
|
||||
*/
|
||||
if (cdb[1] == MO_SET_TARGET_PGS &&
|
||||
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
||||
dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
||||
cmd->execute_cmd =
|
||||
target_emulate_set_target_port_groups;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user