blk-timeout.c 5.85 KB
Newer Older
1 2 3 4 5 6
/*
 * Functions related to generic timeout handling of requests.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
7
#include <linux/fault-inject.h>
8 9

#include "blk.h"
10
#include "blk-mq.h"
11

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#ifdef CONFIG_FAIL_IO_TIMEOUT

static DECLARE_FAULT_ATTR(fail_io_timeout);

static int __init setup_fail_io_timeout(char *str)
{
	return setup_fault_attr(&fail_io_timeout, str);
}
__setup("fail_io_timeout=", setup_fail_io_timeout);

int blk_should_fake_timeout(struct request_queue *q)
{
	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
		return 0;

	return should_fail(&fail_io_timeout, 1);
}

static int __init fail_io_timeout_debugfs(void)
{
32 33 34
	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
						NULL, &fail_io_timeout);

35
	return PTR_ERR_OR_ZERO(dir);
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
}

late_initcall(fail_io_timeout_debugfs);

ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
			  char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);

	return sprintf(buf, "%d\n", set != 0);
}

ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
			   const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	int val;

	if (count) {
		struct request_queue *q = disk->queue;
		char *p = (char *) buf;

		val = simple_strtoul(p, &p, 10);
		spin_lock_irq(q->queue_lock);
		if (val)
			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
		else
			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
		spin_unlock_irq(q->queue_lock);
	}

	return count;
}

#endif /* CONFIG_FAIL_IO_TIMEOUT */

73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * blk_delete_timer - Delete/cancel timer for a given function.
 * @req:	request that we are canceling timer for
 *
 */
void blk_delete_timer(struct request *req)
{
	list_del_init(&req->timeout_list);
}

static void blk_rq_timed_out(struct request *req)
{
	struct request_queue *q = req->q;
86
	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87

88 89
	if (q->rq_timed_out_fn)
		ret = q->rq_timed_out_fn(req);
90 91
	switch (ret) {
	case BLK_EH_HANDLED:
92
		__blk_complete_request(req);
93 94
		break;
	case BLK_EH_RESET_TIMER:
95
		blk_add_timer(req);
96
		blk_clear_rq_complete(req);
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
		break;
	case BLK_EH_NOT_HANDLED:
		/*
		 * LLD handles this for now but in the future
		 * we can send a request msg to abort the command
		 * and we can move more of the generic scsi eh code to
		 * the blk layer.
		 */
		break;
	default:
		printk(KERN_ERR "block: bad eh return: %d\n", ret);
		break;
	}
}

112
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
			  unsigned int *next_set)
{
	if (time_after_eq(jiffies, rq->deadline)) {
		list_del_init(&rq->timeout_list);

		/*
		 * Check if we raced with end io completion
		 */
		if (!blk_mark_rq_complete(rq))
			blk_rq_timed_out(rq);
	} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
		*next_timeout = rq->deadline;
		*next_set = 1;
	}
}

129
void blk_timeout_work(struct work_struct *work)
130
{
131 132
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
133
	unsigned long flags, next = 0;
134
	struct request *rq, *tmp;
135
	int next_set = 0;
136 137 138

	spin_lock_irqsave(q->queue_lock, flags);

139 140
	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
		blk_rq_check_expired(rq, &next, &next_set);
141

142
	if (next_set)
143
		mod_timer(&q->timeout, round_jiffies_up(next));
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

	spin_unlock_irqrestore(q->queue_lock, flags);
}

/**
 * blk_abort_request -- Request request recovery for the specified command
 * @req:	pointer to the request of interest
 *
 * This function requests that the block layer start recovery for the
 * request by deleting the timer and calling the q's timeout function.
 * LLDDs who implement their own error recovery MAY ignore the timeout
 * event if they generated blk_abort_req. Must hold queue lock.
 */
void blk_abort_request(struct request *req)
{
159 160
	if (blk_mark_rq_complete(req))
		return;
161 162

	if (req->q->mq_ops) {
163
		blk_mq_rq_timed_out(req, false);
164 165
	} else {
		blk_delete_timer(req);
166
		blk_rq_timed_out(req);
167
	}
168 169 170
}
EXPORT_SYMBOL_GPL(blk_abort_request);

171 172 173 174 175 176 177 178 179 180 181
unsigned long blk_rq_timeout(unsigned long timeout)
{
	unsigned long maxt;

	maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
	if (time_after(timeout, maxt))
		timeout = maxt;

	return timeout;
}

182 183 184 185 186 187 188 189 190
/**
 * blk_add_timer - Start timeout timer for a single request
 * @req:	request that is about to start running.
 *
 * Notes:
 *    Each request has its own timer, and as it is added to the queue, we
 *    set up the timer. When the request completes, we cancel the timer.
 */
void blk_add_timer(struct request *req)
191 192 193 194
{
	struct request_queue *q = req->q;
	unsigned long expiry;

195 196 197
	if (!q->mq_ops)
		lockdep_assert_held(q->queue_lock);

198 199
	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
	if (!q->mq_ops && !q->rq_timed_out_fn)
200 201 202 203
		return;

	BUG_ON(!list_empty(&req->timeout_list));

204 205 206 207 208
	/*
	 * Some LLDs, like scsi, peek at the timeout to prevent a
	 * command from being retried forever.
	 */
	if (!req->timeout)
209
		req->timeout = q->rq_timeout;
210

211
	WRITE_ONCE(req->deadline, jiffies + req->timeout);
212 213 214 215 216

	/*
	 * Only the non-mq case needs to add the request to a protected list.
	 * For the mq case we simply scan the tag map.
	 */
217 218
	if (!q->mq_ops)
		list_add_tail(&req->timeout_list, &req->q->timeout_list);
219 220 221

	/*
	 * If the timer isn't already pending or this timeout is earlier
222
	 * than an existing one, modify the timer. Round up to next nearest
223 224
	 * second.
	 */
225
	expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
226 227

	if (!timer_pending(&q->timeout) ||
228 229 230 231 232 233 234 235 236 237
	    time_before(expiry, q->timeout.expires)) {
		unsigned long diff = q->timeout.expires - expiry;

		/*
		 * Due to added timer slack to group timers, the timer
		 * will often be a little in front of what we asked for.
		 * So apply some tolerance here too, otherwise we keep
		 * modifying the timer because expires for value X
		 * will be X + something.
		 */
238
		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
239 240
			mod_timer(&q->timeout, expiry);
	}
241 242

}