Commit 34a9d2c3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch '3.2-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

* '3.2-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (25 commits)
  iscsi-target: Fix hex2bin warn_unused compile message
  target: Don't return an error if disabling unsupported features
  target/rd: fix or rewrite the copy routine
  target/rd: simplify the page/offset computation
  target: remove the unused se_dev_list
  target/file: walk properly over sg list
  target: remove unused struct fields
  target: Fix page length in emulated INQUIRY VPD page 86h
  target: Handle 0 correctly in transport_get_sectors_6()
  target: Don't return an error status for 0-length READ and WRITE
  iscsi-target: Use kmemdup rather than duplicating its implementation
  iscsi-target: Add missing F_BIT for iscsi_tm_rsp
  iscsi-target: Fix residual count hanlding + remove iscsi_cmd->residual_count
  target: Reject SCSI data overflow for fabrics using transport_generic_map_mem_to_cmd
  target: remove the unused t_task_pt_sgl and t_task_pt_sgl_num se_cmd fields
  target: remove the t_tasks_bidi se_cmd field
  target: remove the t_tasks_fua se_cmd field
  target: remove the se_ordered_node se_cmd field
  target: remove the se_obj_ptr and se_orig_obj_ptr se_cmd fields
  target: Drop config_item_name usage in fabric TFO->free_wwn()
  ...
parents a694ad94 ddca8f3e
......@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
......@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
......@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
send_check_condition = 1;
goto attach_cmd;
}
......@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
cmd->data_length = cmd->se_cmd.data_length;
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
......@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
......@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
(se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
......@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
......@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
......@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
......
......@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
int j = DIV_ROUND_UP(len, 2);
int j = DIV_ROUND_UP(len, 2), rc;
hex2bin(dst, src, j);
rc = hex2bin(dst, src, j);
if (rc < 0)
pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
......
......@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
......@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
......@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
......
......@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (se_cmd->se_cmd_flags &
SCF_SCSI_RESERVATION_CONFLICT) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
......
......@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
return -1;
return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
......@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
kfree(sess);
return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
......@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
return -1;
kfree(sess);
return -ENOMEM;
}
sess->se_sess = transport_init_session();
if (!sess->se_sess) {
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
kfree(sess);
return -ENOMEM;
}
return 0;
......
......@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
......
......@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
/*
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
se_cmd->t_tasks_bidi = 1;
se_cmd->se_cmd_flags |= SCF_BIDI;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
......@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
if (ret == -ENOMEM) {
/* Out of Resources */
return PYX_TRANSPORT_LU_COMM_FAILURE;
} else if (ret == -EINVAL) {
/*
* Handle case for SAM_STAT_RESERVATION_CONFLICT
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
return PYX_TRANSPORT_RESERVATION_CONFLICT;
/*
* Otherwise, return SAM_STAT_CHECK_CONDITION and return
* sense data.
*/
return PYX_TRANSPORT_USE_SENSE_REASON;
}
if (ret != 0)
return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
if (se_cmd->t_tasks_bidi) {
if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
......@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
return 0;
}
/*
......@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
int host_no = tl_hba->sh->host_no;
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
......
......@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!l_port) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
/*
......@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
......@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
......@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
......@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
rc = -1;
......@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
} else {
......@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
}
......@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
......@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
......
......@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
buf[2] = 0x3c;
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
......@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
......@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
......@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
return -EINVAL;
}
offset += length;
......@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
......@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
......@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
......@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
dev->transport->do_sync_cache(task);
......
......@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
static DEFINE_SPINLOCK(se_device_lock);
static LIST_HEAD(se_dev_list);
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
......@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
......@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
spin_lock(&se_device_lock);
list_add_tail(&se_dev->se_dev_node, &se_dev_list);
spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
......@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
spin_lock(&se_device_lock);
list_del(&se_dev->se_dev_node);
spin_unlock(&se_device_lock);
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
......
......@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
......@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
......@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
......@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/* se_release_device_for_hba():
......@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
pr_err("dpo_emulated not supported\n");
return -EINVAL;
if (flag) {
pr_err("dpo_emulated not supported\n");
return -EINVAL;
}
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
......@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
if (dev->transport->fua_write_emulated == 0) {
if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
......@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
pr_err("ua read emulated not supported\n");
return -EINVAL;
if (flag) {
pr_err("ua read emulated not supported\n");
return -EINVAL;
}
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
......@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->write_cache_emulated == 0) {
if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
......@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
......@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
......@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
......
......@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
......@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
......@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
cmd->t_tasks_fua) {
(cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
......@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
if (ret < 0)
if (ret < 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}