Commit dc843ef0 authored by James Bottomley's avatar James Bottomley

Merge remote-tracking branch 'scsi-queue/core-for-3.19' into for-linus

parents 009d0431 249b15ba
......@@ -827,10 +827,6 @@ but in the event of any barrier requests in the tag queue we need to ensure
that requests are restarted in the order they were queue. This may happen
if the driver needs to use blk_queue_invalidate_tags().
Tagging also defines a new request flag, REQ_QUEUED. This is set whenever
a request is currently tagged. You should not use this flag directly,
blk_rq_tagged(rq) is the portable way to do so.
3.3 I/O Submission
The routine submit_bio() is used to submit a single io. Higher level i/o
......
......@@ -271,9 +271,9 @@ init_this_scsi_driver() ----+
slave_destroy() ***
------------------------------------------------------------
The mid level invokes scsi_adjust_queue_depth() with tagged queuing off and
"cmd_per_lun" for that host as the queue length. These settings can be
overridden by a slave_configure() supplied by the LLD.
The mid level invokes scsi_adjust_queue_depth() with "cmd_per_lun" for that
host as the queue length. These settings can be overridden by a
slave_configure() supplied by the LLD.
*** For scsi devices that the mid level tries to scan but do not
respond, a slave_alloc(), slave_destroy() pair is called.
......@@ -366,13 +366,11 @@ is initialized. The functions below are listed alphabetically and their
names all start with "scsi_".
Summary:
scsi_activate_tcq - turn on tag command queueing
scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and set up transport class
scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_bios_ptable - return copy of block device's partition table
scsi_block_requests - prevent further commands being queued to given host
scsi_deactivate_tcq - turn off tag command queueing
scsi_host_alloc - return a new scsi_host instance whose refcount==1
scsi_host_get - increments Scsi_Host instance's refcount
scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
......@@ -389,24 +387,6 @@ Summary:
Details:
/**
* scsi_activate_tcq - turn on tag command queueing ("ordered" task attribute)
* @sdev: device to turn on TCQ for
* @depth: queue depth
*
* Returns nothing
*
* Might block: no
*
* Notes: Eventually, it is hoped depth would be the maximum depth
* the device could cope with and the real queue depth
* would be adjustable from 0 to depth.
*
* Defined (inline) in: include/scsi/scsi_tcq.h
**/
void scsi_activate_tcq(struct scsi_device *sdev, int depth)
/**
* scsi_add_device - creates new scsi device (lu) instance
* @shost: pointer to scsi host instance
......@@ -458,9 +438,6 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
/**
* scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device
* @sdev: pointer to SCSI device to change queue depth on
* @tagged: 0 - no tagged queuing
* MSG_SIMPLE_TAG - simple tagged queuing
* MSG_ORDERED_TAG - ordered tagged queuing
* @tags Number of tags allowed if tagged queuing enabled,
* or number of commands the LLD can queue up
* in non-tagged mode (as per cmd_per_lun).
......@@ -471,15 +448,12 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
*
* Notes: Can be invoked any time on a SCSI device controlled by this
* LLD. [Specifically during and after slave_configure() and prior to
* slave_destroy().] Can safely be invoked from interrupt code. Actual
* queue depth change may be delayed until the next command is being
* processed. See also scsi_activate_tcq() and scsi_deactivate_tcq().
* slave_destroy().] Can safely be invoked from interrupt code.
*
* Defined in: drivers/scsi/scsi.c [see source code for more notes]
*
**/
void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
int tags)
void scsi_adjust_queue_depth(struct scsi_device *sdev, int tags)
/**
......@@ -514,20 +488,6 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
void scsi_block_requests(struct Scsi_Host * shost)
/**
* scsi_deactivate_tcq - turn off tag command queueing
* @sdev: device to turn off TCQ for
* @depth: queue depth (stored in sdev)
*
* Returns nothing
*
* Might block: no
*
* Defined (inline) in: include/scsi/scsi_tcq.h
**/
void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
/**
* scsi_host_alloc - create a scsi host adapter instance and perform basic
* initialization.
......
......@@ -506,9 +506,11 @@ user does not request data that far.)
DEBUGGING HINTS
To enable debugging messages, edit st.c and #define DEBUG 1. As seen
above, debugging can be switched off with an ioctl if debugging is
compiled into the driver. The debugging output is not voluminous.
Debugging code is now compiled in by default but debugging is turned off
with the kernel module parameter debug_flag defaulting to 0. Debugging
can still be switched on and off with an ioctl. To enable debug at
module load time add debug_flag=1 to the module load options, the
debugging output is not voluminous.
If the tape seems to hang, I would be very interested to hear where
the driver is waiting. With the command 'ps -l' you can see the state
......
......@@ -1266,7 +1266,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
if (blk_rq_tagged(rq))
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
......@@ -2554,7 +2554,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
if (blk_rq_tagged(req))
if (req->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(req->q, req);
BUG_ON(blk_queued_rq(req));
......
......@@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
return 0;
}
/**
* blk_mq_unique_tag() - return a tag that is unique queue-wide
* @rq: request for which to compute a unique tag
*
* The tag field in struct request is unique per hardware queue but not over
* all hardware queues. Hence this function that returns a tag with the
* hardware context index in the upper bits and the per hardware queue tag in
* the lower bits.
*
* Note: When called for a request that is queued on a non-multiqueue request
* queue, the hardware context index is set to zero.
*/
u32 blk_mq_unique_tag(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
int hwq = 0;
if (q->mq_ops) {
hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
......
......@@ -2049,6 +2049,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
......
......@@ -142,7 +142,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
__set_bit(VERIFY_16, filter->read_ok);
__set_bit(REPORT_LUNS, filter->read_ok);
__set_bit(SERVICE_ACTION_IN, filter->read_ok);
__set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
__set_bit(MAINTENANCE_IN, filter->read_ok);
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
......
......@@ -1164,7 +1164,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth);
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
scsi_adjust_queue_depth(sdev, depth);
}
blk_queue_flush_queueable(q, false);
......@@ -1282,7 +1282,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
if (sdev->queue_depth == queue_depth)
return -EINVAL;
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
}
......@@ -3570,7 +3570,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
break;
case SERVICE_ACTION_IN:
case SERVICE_ACTION_IN_16:
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
......
......@@ -329,7 +329,7 @@ INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
#define IS_READ_CAP_16(cdb) \
((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
/* Request Sense Helper Macros */
#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
......@@ -2947,7 +2947,7 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
case READ_CAPACITY:
retcode = nvme_trans_read_capacity(ns, hdr, cmd);
break;
case SERVICE_ACTION_IN:
case SERVICE_ACTION_IN_16:
if (IS_READ_CAP_16(cmd))
retcode = nvme_trans_read_capacity(ns, hdr, cmd);
else
......
......@@ -2258,28 +2258,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
/**
* srp_change_queue_type - changing device queue tag type
* @sdev: scsi device struct
* @tag_type: requested tag type
*
* Returns queue tag type.
*/
static int
srp_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
if (tag_type)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag_type = 0;
return tag_type;
}
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
......@@ -2300,7 +2278,7 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
} else if (reason == SCSI_QDEPTH_QFULL)
scsi_track_queue_full(sdev, qdepth);
else
......@@ -2600,7 +2578,7 @@ static struct scsi_host_template srp_template = {
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
.change_queue_type = srp_change_queue_type,
.change_queue_type = scsi_change_queue_type,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
......
......@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
.use_blk_tags = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
......
......@@ -2322,7 +2322,6 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
VirtTarget *vtarget;
struct scsi_target *starget;
int max_depth;
int tagged;
MPT_ADAPTER *ioc = hd->ioc;
starget = scsi_target(sdev);
......@@ -2347,12 +2346,8 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
if (qdepth > max_depth)
qdepth = max_depth;
if (qdepth == 1)
tagged = 0;
else
tagged = MSG_SIMPLE_TAG;
scsi_adjust_queue_depth(sdev, tagged, qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......@@ -2400,9 +2395,8 @@ mptscsih_slave_configure(struct scsi_device *sdev)
mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH,
SCSI_QDEPTH_DEFAULT);
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"tagged %d, simple %d, ordered %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags,
sdev->ordered_tags));
"tagged %d, simple %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags));
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
......
......@@ -187,6 +187,7 @@ void enclosure_unregister(struct enclosure_device *edev)
EXPORT_SYMBOL_GPL(enclosure_unregister);
#define ENCLOSURE_NAME_SIZE 64
#define COMPONENT_NAME_SIZE 64
static void enclosure_link_name(struct enclosure_component *cdev, char *name)
{
......@@ -246,6 +247,29 @@ static void enclosure_component_release(struct device *dev)
put_device(dev->parent);
}
static struct enclosure_component *
enclosure_component_find_by_name(struct enclosure_device *edev,
const char *name)
{
int i;
const char *cname;
struct enclosure_component *ecomp;
if (!edev || !name || !name[0])
return NULL;
for (i = 0; i < edev->components; i++) {
ecomp = &edev->component[i];
cname = dev_name(&ecomp->cdev);
if (ecomp->number != -1 &&
cname && cname[0] &&
!strcmp(cname, name))
return ecomp;
}
return NULL;
}
static const struct attribute_group *enclosure_component_groups[];
/**
......@@ -269,7 +293,8 @@ enclosure_component_register(struct enclosure_device *edev,
{
struct enclosure_component *ecomp;
struct device *cdev;
int err;
int err, i;
char newname[COMPONENT_NAME_SIZE];
if (number >= edev->components)
return ERR_PTR(-EINVAL);
......@@ -283,9 +308,20 @@ enclosure_component_register(struct enclosure_device *edev,
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = get_device(&edev->edev);
if (name && name[0])
dev_set_name(cdev, "%s", name);
else
if (name && name[0]) {
/* Some hardware (e.g. enclosure in RX300 S6) has components
* with non unique names. Registering duplicates in sysfs
* will lead to warnings during bootup. So make the names
* unique by appending consecutive numbers -1, -2, ... */
i = 1;
snprintf(newname, COMPONENT_NAME_SIZE,
"%s", name);
while (enclosure_component_find_by_name(edev, newname))
snprintf(newname, COMPONENT_NAME_SIZE,
"%s-%i", name, i++);
dev_set_name(cdev, "%s", newname);
} else
dev_set_name(cdev, "%u", number);
cdev->release = enclosure_component_release;
......
......@@ -212,8 +212,6 @@ static inline
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
if (unlikely(tm_flags)) {
......@@ -221,17 +219,7 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
return;
}
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
fcp->fc_pri_ta |= FCP_PTA_ORDERED;
break;
case MSG_SIMPLE_TAG:
fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
break;
};
} else
fcp->fc_pri_ta = FCP_PTA_SIMPLE;
fcp->fc_pri_ta = FCP_PTA_SIMPLE;
if (scsi->sc_data_direction == DMA_FROM_DEVICE)
fcp->fc_flags |= FCP_CFL_RDDATA;
......
......@@ -37,13 +37,13 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
default:
return -EOPNOTSUPP;
......@@ -66,9 +66,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
scsi_adjust_queue_depth(sdp, default_depth);
return 0;
}
......
......@@ -198,7 +198,7 @@ static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End twa_change_queue_depth() */
......
......@@ -200,7 +200,7 @@ static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End twl_change_queue_depth() */
......
......@@ -532,7 +532,7 @@ static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End tw_change_queue_depth() */
......
......@@ -327,6 +327,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->slave_alloc = NCR_700_slave_alloc;
tpnt->change_queue_depth = NCR_700_change_queue_depth;
tpnt->change_queue_type = NCR_700_change_queue_type;
tpnt->use_blk_tags = 1;
if(tpnt->name == NULL)
tpnt->name = "53c700";
......@@ -592,19 +593,14 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
hostdata->cmd = NULL;
if(SCp != NULL) {
struct NCR_700_command_slot *slot =
struct NCR_700_command_slot *slot =
(struct NCR_700_command_slot *)SCp->host_scribble;
dma_unmap_single(hostdata->dev, slot->pCmd,
MAX_COMMAND_SIZE, DMA_TO_DEVICE);
if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
#ifdef NCR_700_DEBUG
printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
SCp, SCp->cmnd[7], result);
scsi_print_sense("53c700", SCp);
#endif
dma_unmap_single(hostdata->dev, slot->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/* restore the old result if the request sense was
......@@ -906,8 +902,10 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
/* we're done negotiating */
NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
SCp->device->tagged_supported = 0;
scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
scsi_adjust_queue_depth(SCp->device, host->cmd_per_lun);
scsi_set_tag_type(SCp->device, 0);
} else {
shost_printk(KERN_WARNING, host,
"(%d:%d) Unexpected REJECT Message %s\n",
......@@ -1432,7 +1430,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
&& (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
slot->flags != NCR_700_FLAG_AUTOSENSE)) {
count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
}
if(hostdata->fast &&
......@@ -1772,7 +1770,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
*/
if(NCR_700_get_depth(SCp->device) != 0
&& (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
|| !blk_rq_tagged(SCp->request))) {
|| !(SCp->flags & SCMD_TAGGED))) {
CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
NCR_700_get_depth(SCp->device));
return SCSI_MLQUEUE_DEVICE_BUSY;
......@@ -1800,7 +1798,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
scsi_print_command(SCp);
#endif
if(blk_rq_tagged(SCp->request)
if ((SCp->flags & SCMD_TAGGED)
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
&& NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
......@@ -1814,7 +1812,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
*
* FIXME: This will royally screw up on multiple LUN devices
* */
if(!blk_rq_tagged(SCp->request)
if (!(SCp->flags & SCMD_TAGGED)
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
......@@ -1911,9 +1909,7 @@ NCR_700_abort(struct scsi_cmnd * SCp)
{
struct NCR_700_command_slot *slot;
scmd_printk(KERN_INFO, SCp,
"New error handler wants to abort command\n\t");
scsi_print_command(SCp);
scmd_printk(KERN_INFO, SCp, "abort command\n");
slot = (struct NCR_700_command_slot *)SCp->host_scribble;
......@@ -2056,13 +2052,10 @@ NCR_700_slave_configure(struct scsi_device *SDp)
/* to do here: allocate memory; build a queue_full list */
if(SDp->tagged_supported) {
scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
scsi_adjust_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
} else {
/* initialise to default depth */
scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
}
if(hostdata->fast) {
/* Find the correct offset and period via domain validation */
if (!spi_initial_dv(SDp->sdev_target))
......@@ -2090,7 +2083,7 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
if (depth > NCR_700_MAX_TAGS)
depth = NCR_700_MAX_TAGS;
scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
scsi_adjust_queue_depth(SDp, depth);
return depth;
}
......@@ -2101,8 +2094,6 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
scsi_set_tag_type(SDp, tag_type);
/* We have a global (per target) flag to track whether TCQ is
* enabled, so we'll be turning it off for the entire target here.
* our tag algorithm will fail if we mix tagged and untagged commands,
......@@ -2110,15 +2101,16 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
if (change_tag)
scsi_target_quiesce(SDp->sdev_target);
scsi_set_tag_type(SDp, tag_type);
if (!tag_type) {
/* shift back to the default unqueued number of commands
* (the user can still raise this) */
scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
scsi_adjust_queue_depth(SDp, SDp->host->cmd_per_lun);
hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
} else {
/* Here, we cleared the negotiation flag above, so this
* will force the driver to renegotiate */
scsi_activate_tcq(SDp, SDp->queue_depth);
scsi_adjust_queue_depth(SDp, SDp->queue_depth);
if (change_tag)
NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
}
......
......@@ -2327,12 +2327,12 @@ static int blogic_slaveconfig(struct scsi_device *dev)
if (qdepth == 0)
qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH;
adapter->qdepth[tgt_id] = qdepth;
scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, qdepth);
scsi_adjust_queue_depth(dev, qdepth);
} else {
adapter->tagq_ok &= ~(1 << tgt_id);
qdepth = adapter->untag_qdepth;
adapter->qdepth[tgt_id] = qdepth;
scsi_adjust_queue_depth(dev, 0, qdepth);
scsi_adjust_queue_depth(dev, qdepth);
}
qdepth = 0;
for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
......
......@@ -2647,14 +2647,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
*
* Purpose : abort a command
*
* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
* host byte of the result field to, if zero DID_ABORTED is
* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
* host byte of the result field to, if zero DID_ABORTED is
* used.
*
* Returns : 0 - success, -1 on failure.
* Returns : SUCCESS - success, FAILED on failure.
*
* XXX - there is no way to abort the command that is currently
* connected, you have to wait for it to complete. If this is
* XXX - there is no way to abort the command that is currently
* connected, you have to wait for it to complete. If this is
* a problem, we could implement longjmp() / setjmp(), setjmp()
* called where the loop started in NCR5380_main().
*
......@@ -2666,9 +2666,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
Scsi_Cmnd *tmp, **prev;
printk(KERN_WARNING "scsi%d : aborting command\n", instance->host_no);
scsi_print_command(cmd);
scmd_printk(KERN_WARNING, cmd, "aborting command\n");
NCR5380_print_status(instance);
......@@ -2704,7 +2703,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
* aborted flag and get back into our main loop.
*/
return 0;
return SUCCESS;
}
#endif
......