industrialio-buffer.c 36.6 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23
#include <linux/sched/signal.h>
24

25
#include <linux/iio/iio.h>
26
#include "iio_core.h"
27 28
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
29
#include <linux/iio/buffer_impl.h>
30

31 32 33 34
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
35

36
static bool iio_buffer_is_active(struct iio_buffer *buf)
37
{
38
	return !list_empty(&buf->buffer_list);
39 40
}

41
static size_t iio_buffer_data_available(struct iio_buffer *buf)
42
{
43
	return buf->access->data_available(buf);
44 45
}

46 47 48 49 50 51 52 53 54
static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
				   struct iio_buffer *buf, size_t required)
{
	if (!indio_dev->info->hwfifo_flush_to_buffer)
		return -ENODEV;

	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
}

55
static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56
			     size_t to_wait, int to_flush)
57
{
58 59 60
	size_t avail;
	int flushed = 0;

61 62 63 64 65
	/* wakeup if the device was unregistered */
	if (!indio_dev->info)
		return true;

	/* drain the buffer if it was disabled */
66
	if (!iio_buffer_is_active(buf)) {
67
		to_wait = min_t(size_t, to_wait, 1);
68 69 70 71
		to_flush = 0;
	}

	avail = iio_buffer_data_available(buf);
72

73 74
	if (avail >= to_wait) {
		/* force a flush for non-blocking reads */
75 76 77
		if (!to_wait && avail < to_flush)
			iio_buffer_flush_hwfifo(indio_dev, buf,
						to_flush - avail);
78 79 80 81 82 83 84 85 86 87
		return true;
	}

	if (to_flush)
		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
						  to_wait - avail);
	if (flushed <= 0)
		return false;

	if (avail + flushed >= to_wait)
88 89 90 91 92
		return true;

	return false;
}

93
/**
94
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
95 96 97 98
 * @filp:	File structure pointer for the char device
 * @buf:	Destination buffer for iio buffer read
 * @n:		First n bytes to read
 * @f_ps:	Long offset provided by the user as a seek position
99
 *
100 101
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
102 103 104
 *
 * Return: negative values corresponding to error codes or ret != 0
 *	   for ending the reading activity
105
 **/
106 107
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
108
{
109
	struct iio_dev *indio_dev = filp->private_data;
110
	struct iio_buffer *rb = indio_dev->buffer;
111
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
112
	size_t datum_size;
113
	size_t to_wait;
114
	int ret = 0;
115

116 117 118
	if (!indio_dev->info)
		return -ENODEV;

119
	if (!rb || !rb->access->read_first_n)
120
		return -EINVAL;
121

122 123 124 125 126 127 128 129 130
	datum_size = rb->bytes_per_datum;

	/*
	 * If datum_size is 0 there will never be anything to read from the
	 * buffer, so signal end of file now.
	 */
	if (!datum_size)
		return 0;

131 132 133 134
	if (filp->f_flags & O_NONBLOCK)
		to_wait = 0;
	else
		to_wait = min_t(size_t, n / datum_size, rb->watermark);
135

136
	add_wait_queue(&rb->pollq, &wait);
137
	do {
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
		if (!indio_dev->info) {
			ret = -ENODEV;
			break;
		}

		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
			if (signal_pending(current)) {
				ret = -ERESTARTSYS;
				break;
			}

			wait_woken(&wait, TASK_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT);
			continue;
		}
153 154 155 156

		ret = rb->access->read_first_n(rb, n, buf);
		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
			ret = -EAGAIN;
157
	} while (ret == 0);
158
	remove_wait_queue(&rb->pollq, &wait);
159 160

	return ret;
161 162
}

163
/**
164
 * iio_buffer_poll() - poll the buffer to find out if it has data
165 166 167 168 169 170
 * @filp:	File structure pointer for device access
 * @wait:	Poll table structure pointer for which the driver adds
 *		a wait queue
 *
 * Return: (POLLIN | POLLRDNORM) if data is available for reading
 *	   or 0 for other cases
171
 */
172 173
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
174
{
175
	struct iio_dev *indio_dev = filp->private_data;
176
	struct iio_buffer *rb = indio_dev->buffer;
177

178
	if (!indio_dev->info || rb == NULL)
179
		return 0;
180

181
	poll_wait(filp, &rb->pollq, wait);
182
	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
183
		return POLLIN | POLLRDNORM;
184
	return 0;
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/**
 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 * @indio_dev: The IIO device
 *
 * Wakes up the event waitqueue used for poll(). Should usually
 * be called when the device is unregistered.
 */
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
	if (!indio_dev->buffer)
		return;

	wake_up(&indio_dev->buffer->pollq);
}

202
void iio_buffer_init(struct iio_buffer *buffer)
203
{
204
	INIT_LIST_HEAD(&buffer->demux_list);
205
	INIT_LIST_HEAD(&buffer->buffer_list);
206
	init_waitqueue_head(&buffer->pollq);
207
	kref_init(&buffer->ref);
208 209
	if (!buffer->watermark)
		buffer->watermark = 1;
210
}
211
EXPORT_SYMBOL(iio_buffer_init);
212

213 214 215 216 217 218 219 220 221 222 223 224
/**
 * iio_buffer_set_attrs - Set buffer specific attributes
 * @buffer: The buffer for which we are setting attributes
 * @attrs: Pointer to a null terminated list of pointers to attributes
 */
void iio_buffer_set_attrs(struct iio_buffer *buffer,
			 const struct attribute **attrs)
{
	buffer->attrs = attrs;
}
EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);

225
static ssize_t iio_show_scan_index(struct device *dev,
226 227
				   struct device_attribute *attr,
				   char *buf)
228
{
229
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
230 231 232 233 234 235 236
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
237 238 239
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
240 241 242 243 244
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
245
	}
246 247 248 249 250 251 252 253 254 255
	if (this_attr->c->scan_type.repeat > 1)
		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
		       iio_endian_prefix[type],
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.repeat,
		       this_attr->c->scan_type.shift);
		else
			return sprintf(buf, "%s:%c%d/%d>>%u\n",
256
		       iio_endian_prefix[type],
257 258 259 260 261 262
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

263 264 265 266 267
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
268
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269

270 271
	/* Ensure ret is 0 or 1. */
	ret = !!test_bit(to_iio_dev_attr(attr)->address,
272 273
		       indio_dev->buffer->scan_mask);

274 275 276
	return sprintf(buf, "%d\n", ret);
}

277 278 279
/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
					  unsigned int masklength,
280 281
					  const unsigned long *mask,
					  bool strict)
282 283 284 285
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
286 287 288 289 290 291 292
		if (strict) {
			if (bitmap_equal(mask, av_masks, masklength))
				return av_masks;
		} else {
			if (bitmap_subset(mask, av_masks, masklength))
				return av_masks;
		}
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

/**
 * iio_scan_mask_set() - set particular bit in the scan mask
 * @indio_dev: the iio device
 * @buffer: the buffer whose scan mask we are interested in
 * @bit: the bit to be set.
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
static int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
{
	const unsigned long *mask;
	unsigned long *trialmask;

323 324 325
	trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
				  sizeof(*trialmask),
				  GFP_KERNEL);
326 327 328
	if (trialmask == NULL)
		return -ENOMEM;
	if (!indio_dev->masklength) {
Dan Carpenter's avatar
Dan Carpenter committed
329
		WARN(1, "Trying to set scanmask prior to registering buffer\n");
330 331 332 333 334 335 336 337 338 339 340
		goto err_invalid_mask;
	}
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
	set_bit(bit, trialmask);

	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
341
					   trialmask, false);
342 343 344 345 346 347 348 349 350 351 352 353 354 355
		if (!mask)
			goto err_invalid_mask;
	}
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);

	kfree(trialmask);

	return 0;

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}

356
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
357
{
358
	clear_bit(bit, buffer->scan_mask);
359 360 361
	return 0;
}

362 363 364 365 366 367 368 369 370 371 372 373 374
static int iio_scan_mask_query(struct iio_dev *indio_dev,
			       struct iio_buffer *buffer, int bit)
{
	if (bit > indio_dev->masklength)
		return -EINVAL;

	if (!buffer->scan_mask)
		return 0;

	/* Ensure return value is 0 or 1. */
	return !!test_bit(bit, buffer->scan_mask);
};

375 376 377 378 379
static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
380
	int ret;
381
	bool state;
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
382
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
383
	struct iio_buffer *buffer = indio_dev->buffer;
384 385
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

386 387 388
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
389
	mutex_lock(&indio_dev->mlock);
390
	if (iio_buffer_is_active(indio_dev->buffer)) {
391 392 393
		ret = -EBUSY;
		goto error_ret;
	}
394
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
395 396 397
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
398
		ret = iio_scan_mask_clear(buffer, this_attr->address);
399 400 401
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
402
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
403 404 405 406 407 408 409
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

410
	return ret < 0 ? ret : len;
411 412 413 414 415 416 417

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
418
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
419
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
420 421 422 423 424 425 426
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
427
	int ret;
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
428
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
429
	bool state;
430

431 432 433 434
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

435
	mutex_lock(&indio_dev->mlock);
436
	if (iio_buffer_is_active(indio_dev->buffer)) {
437 438 439
		ret = -EBUSY;
		goto error_ret;
	}
440
	indio_dev->buffer->scan_timestamp = state;
441 442 443 444 445 446
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

447 448
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
449
{
450
	int ret, attrcount = 0;
451
	struct iio_buffer *buffer = indio_dev->buffer;
452

453
	ret = __iio_add_chan_devattr("index",
454 455 456 457
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
458
				     IIO_SEPARATE,
459
				     &indio_dev->dev,
460
				     &buffer->scan_el_dev_attr_list);
461
	if (ret)
462
		return ret;
463 464
	attrcount++;
	ret = __iio_add_chan_devattr("type",
465 466 467 468 469
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
470
				     &indio_dev->dev,
471
				     &buffer->scan_el_dev_attr_list);
472
	if (ret)
473
		return ret;
474
	attrcount++;
475
	if (chan->type != IIO_TIMESTAMP)
476
		ret = __iio_add_chan_devattr("en",
477 478 479 480 481
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
482
					     &indio_dev->dev,
483
					     &buffer->scan_el_dev_attr_list);
484
	else
485
		ret = __iio_add_chan_devattr("en",
486 487 488 489 490
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
491
					     &indio_dev->dev,
492
					     &buffer->scan_el_dev_attr_list);
493
	if (ret)
494
		return ret;
495 496
	attrcount++;
	ret = attrcount;
497 498 499
	return ret;
}

500 501 502
static ssize_t iio_buffer_read_length(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
503
{
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
504
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
505
	struct iio_buffer *buffer = indio_dev->buffer;
506

507
	return sprintf(buf, "%d\n", buffer->length);
508 509
}

510 511 512
static ssize_t iio_buffer_write_length(struct device *dev,
				       struct device_attribute *attr,
				       const char *buf, size_t len)
513
{
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
514
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
515
	struct iio_buffer *buffer = indio_dev->buffer;
516 517
	unsigned int val;
	int ret;
518

519
	ret = kstrtouint(buf, 10, &val);
520 521 522
	if (ret)
		return ret;

523 524
	if (val == buffer->length)
		return len;
525

526
	mutex_lock(&indio_dev->mlock);
527
	if (iio_buffer_is_active(indio_dev->buffer)) {
528 529
		ret = -EBUSY;
	} else {
530
		buffer->access->set_length(buffer, val);
531
		ret = 0;
532
	}
533 534 535 536 537
	if (ret)
		goto out;
	if (buffer->length && buffer->length < buffer->watermark)
		buffer->watermark = buffer->length;
out:
538
	mutex_unlock(&indio_dev->mlock);
539

540
	return ret ? ret : len;
541 542
}

543 544 545
static ssize_t iio_buffer_show_enable(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
546
{
Lars-Peter Clausen's avatar
Lars-Peter Clausen committed
547
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
548
	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
549 550
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
					     unsigned int scan_index)
{
	const struct iio_chan_spec *ch;
	unsigned int bytes;

	ch = iio_find_channel_from_si(indio_dev, scan_index);
	bytes = ch->scan_type.storagebits / 8;
	if (ch->scan_type.repeat > 1)
		bytes *= ch->scan_type.repeat;
	return bytes;
}

static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
{
	return iio_storage_bytes_for_si(indio_dev,
					indio_dev->scan_index_timestamp);
}

570 571
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
				const unsigned long *mask, bool timestamp)
572 573 574 575 576
{
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
577
	for_each_set_bit(i, mask,
578
			 indio_dev->masklength) {
579
		length = iio_storage_bytes_for_si(indio_dev, i);
580 581 582
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
583

584
	if (timestamp) {
585
		length = iio_storage_bytes_for_timestamp(indio_dev);
586 587 588
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
589 590 591
	return bytes;
}

592 593 594 595 596 597 598 599 600 601
static void iio_buffer_activate(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	iio_buffer_get(buffer);
	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
}

static void iio_buffer_deactivate(struct iio_buffer *buffer)
{
	list_del_init(&buffer->buffer_list);
602
	wake_up_interruptible(&buffer->pollq);
603 604 605
	iio_buffer_put(buffer);
}

606 607 608 609 610 611 612 613 614
static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer, *_buffer;

	list_for_each_entry_safe(buffer, _buffer,
			&indio_dev->buffer_list, buffer_list)
		iio_buffer_deactivate(buffer);
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static int iio_buffer_enable(struct iio_buffer *buffer,
	struct iio_dev *indio_dev)
{
	if (!buffer->access->enable)
		return 0;
	return buffer->access->enable(buffer, indio_dev);
}

static int iio_buffer_disable(struct iio_buffer *buffer,
	struct iio_dev *indio_dev)
{
	if (!buffer->access->disable)
		return 0;
	return buffer->access->disable(buffer, indio_dev);
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	unsigned int bytes;

	if (!buffer->access->set_bytes_per_datum)
		return;

	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
		buffer->scan_timestamp);

	buffer->access->set_bytes_per_datum(buffer, bytes);
}

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
static int iio_buffer_request_update(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	int ret;

	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
	if (buffer->access->request_update) {
		ret = buffer->access->request_update(buffer);
		if (ret) {
			dev_dbg(&indio_dev->dev,
			       "Buffer not started: buffer parameter update failed (%d)\n",
				ret);
			return ret;
		}
	}

	return 0;
}

664 665 666 667 668 669 670 671
static void iio_free_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	/* If the mask is dynamically allocated free it, otherwise do nothing */
	if (!indio_dev->available_scan_masks)
		kfree(mask);
}

672 673
struct iio_device_config {
	unsigned int mode;
674
	unsigned int watermark;
675 676 677 678 679 680 681 682 683 684 685
	const unsigned long *scan_mask;
	unsigned int scan_bytes;
	bool scan_timestamp;
};

static int iio_verify_update(struct iio_dev *indio_dev,
	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
	struct iio_device_config *config)
{
	unsigned long *compound_mask;
	const unsigned long *scan_mask;
686
	bool strict_scanmask = false;
687 688
	struct iio_buffer *buffer;
	bool scan_timestamp;
689
	unsigned int modes;
690 691

	memset(config, 0, sizeof(*config));
692
	config->watermark = ~0;
693 694 695 696 697 698 699 700 701

	/*
	 * If there is just one buffer and we are removing it there is nothing
	 * to verify.
	 */
	if (remove_buffer && !insert_buffer &&
		list_is_singular(&indio_dev->buffer_list))
			return 0;

702 703 704 705 706 707
	modes = indio_dev->modes;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		if (buffer == remove_buffer)
			continue;
		modes &= buffer->access->modes;
708
		config->watermark = min(config->watermark, buffer->watermark);
709 710
	}

711
	if (insert_buffer) {
712
		modes &= insert_buffer->access->modes;
713 714 715
		config->watermark = min(config->watermark,
			insert_buffer->watermark);
	}
716

717
	/* Definitely possible for devices to support both of these. */
718
	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
719
		config->mode = INDIO_BUFFER_TRIGGERED;
720
	} else if (modes & INDIO_BUFFER_HARDWARE) {
721 722 723 724 725 726
		/*
		 * Keep things simple for now and only allow a single buffer to
		 * be connected in hardware mode.
		 */
		if (insert_buffer && !list_empty(&indio_dev->buffer_list))
			return -EINVAL;
727
		config->mode = INDIO_BUFFER_HARDWARE;
728
		strict_scanmask = true;
729
	} else if (modes & INDIO_BUFFER_SOFTWARE) {
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
		config->mode = INDIO_BUFFER_SOFTWARE;
	} else {
		/* Can only occur on first buffer */
		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
		return -EINVAL;
	}

	/* What scan mask do we actually have? */
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL)
		return -ENOMEM;

	scan_timestamp = false;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		if (buffer == remove_buffer)
			continue;
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		scan_timestamp |= buffer->scan_timestamp;
	}

	if (insert_buffer) {
		bitmap_or(compound_mask, compound_mask,
			  insert_buffer->scan_mask, indio_dev->masklength);
		scan_timestamp |= insert_buffer->scan_timestamp;
	}

	if (indio_dev->available_scan_masks) {
		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
				    indio_dev->masklength,
763 764
				    compound_mask,
				    strict_scanmask);
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
		kfree(compound_mask);
		if (scan_mask == NULL)
			return -EINVAL;
	} else {
	    scan_mask = compound_mask;
	}

	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
				    scan_mask, scan_timestamp);
	config->scan_mask = scan_mask;
	config->scan_timestamp = scan_timestamp;

	return 0;
}

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
/**
 * struct iio_demux_table - table describing demux memcpy ops
 * @from:	index to copy from
 * @to:		index to copy to
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

static int iio_buffer_add_demux(struct iio_buffer *buffer,
	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
	unsigned int length)
{

	if (*p && (*p)->from + (*p)->length == in_loc &&
		(*p)->to + (*p)->length == out_loc) {
		(*p)->length += length;
	} else {
		*p = kmalloc(sizeof(**p), GFP_KERNEL);
		if (*p == NULL)
			return -ENOMEM;
		(*p)->from = in_loc;
		(*p)->to = out_loc;
		(*p)->length = length;
		list_add_tail(&(*p)->l, &buffer->demux_list);
	}

	return 0;
}

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
{
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
	struct iio_demux_table *p = NULL;

	/* Clear out any old demux */
	iio_buffer_demux_free(buffer);
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 buffer->scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			length = iio_storage_bytes_for_si(indio_dev, in_ind);
			/* Make sure we are aligned */
			in_loc = roundup(in_loc, length) + length;
		}
		length = iio_storage_bytes_for_si(indio_dev, in_ind);
		out_loc = roundup(out_loc, length);
		in_loc = roundup(in_loc, length);
		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
		if (ret)
			goto error_clear_mux_table;
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		length = iio_storage_bytes_for_timestamp(indio_dev);
		out_loc = roundup(out_loc, length);
		in_loc = roundup(in_loc, length);
		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
		if (ret)
			goto error_clear_mux_table;
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	iio_buffer_demux_free(buffer);

	return ret;
}

static int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}

909 910
static int iio_enable_buffers(struct iio_dev *indio_dev,
	struct iio_device_config *config)
911
{
912
	struct iio_buffer *buffer;
913
	int ret;
914

915 916 917
	indio_dev->active_scan_mask = config->scan_mask;
	indio_dev->scan_timestamp = config->scan_timestamp;
	indio_dev->scan_bytes = config->scan_bytes;
918

919 920
	iio_update_demux(indio_dev);

921 922 923 924
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
925
			dev_dbg(&indio_dev->dev,
926
			       "Buffer not started: buffer preenable failed (%d)\n", ret);
927
			goto err_undo_config;
928 929
		}
	}
930

931 932
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
933 934
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
935
		if (ret < 0) {
936 937 938
			dev_dbg(&indio_dev->dev,
				"Buffer not started: update scan mode failed (%d)\n",
				ret);
939
			goto err_run_postdisable;
940 941
		}
	}
942

943 944 945 946
	if (indio_dev->info->hwfifo_set_watermark)
		indio_dev->info->hwfifo_set_watermark(indio_dev,
			config->watermark);

947 948 949 950 951 952
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_enable(buffer, indio_dev);
		if (ret)
			goto err_disable_buffers;
	}

953
	indio_dev->currentmode = config->mode;
954 955 956 957

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
958
			dev_dbg(&indio_dev->dev,
959
			       "Buffer not started: postenable failed (%d)\n", ret);
960
			goto err_disable_buffers;
961 962 963
		}
	}

964
	return 0;
965

966 967 968 969
err_disable_buffers:
	list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
					     buffer_list)
		iio_buffer_disable(buffer, indio_dev);
970
err_run_postdisable:
971 972 973
	indio_dev->currentmode = INDIO_DIRECT_MODE;
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
974 975 976
err_undo_config:
	indio_dev->active_scan_mask = NULL;

977
	return ret;
978 979 980 981
}

static int iio_disable_buffers(struct iio_dev *indio_dev)
{
982
	struct iio_buffer *buffer;
983 984
	int ret = 0;
	int ret2;
985 986 987 988 989

	/* Wind down existing buffers - iff there are any */
	if (list_empty(&indio_dev->buffer_list))
		return 0;

990 991 992 993 994 995 996
	/*
	 * If things go wrong at some step in disable we still need to continue
	 * to perform the other steps, otherwise we leave the device in a
	 * inconsistent state. We return the error code for the first error we
	 * encountered.
	 */

997
	if (indio_dev->setup_ops->predisable) {
998 999 1000
		ret2 = indio_dev->setup_ops->predisable(indio_dev);
		if (ret2 && !ret)
			ret = ret2;
1001
	}
1002 1003 1004 1005 1006 1007

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret2 = iio_buffer_disable(buffer, indio_dev);
		if (ret2 && !ret)
			ret = ret2;
	}
1008 1009 1010 1011

	indio_dev->currentmode = INDIO_DIRECT_MODE;

	if (indio_dev->setup_ops->postdisable) {
1012 1013 1014
		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
		if (ret2 && !ret)
			ret = ret2;
1015 1016
	}

1017 1018 1019 1020
	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
	indio_dev->active_scan_mask = NULL;

	return ret;
1021 1022 1023 1024 1025 1026 1027
}

static int __iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
{
	struct iio_device_config new_config;
1028
	int ret;
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041

	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
		&new_config);
	if (ret)
		return ret;

	if (insert_buffer) {
		ret = iio_buffer_request_update(indio_dev, insert_buffer);
		if (ret)
			goto err_free_config;
	}

	ret = iio_disable_buffers(indio_dev);
1042 1043
	if (ret)
		goto err_deactivate_all;
1044 1045 1046 1047 1048 1049 1050

	if (remove_buffer)
		iio_buffer_deactivate(remove_buffer);
	if (insert_buffer)
		iio_buffer_activate(indio_dev, insert_buffer);

	/* If no buffers in list, we are done */
1051
	if (list_empty(&indio_dev->buffer_list))
1052 1053 1054
		return 0;

	ret = iio_enable_buffers(indio_dev, &new_config);
1055 1056
	if (ret)
		goto err_deactivate_all;
1057 1058

	return 0;
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
err_deactivate_all:
	/*
	 * We've already verified that the config is valid earlier. If things go
	 * wrong in either enable or disable the most likely reason is an IO
	 * error from the device. In this case there is no good recovery
	 * strategy. Just make sure to disable everything and leave the device
	 * in a sane state.  With a bit of luck the device might come back to
	 * life again later and userspace can try again.
	 */
	iio_buffer_deactivate_all(indio_dev);

1071 1072 1073
err_free_config:
	iio_free_scan_mask(indio_dev, new_config.scan_mask);
	return ret;
1074
}
1075 1076 1077 1078 1079 1080 1081

int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
{
	int ret;

1082 1083 1084
	if (insert_buffer == remove_buffer)
		return 0;

1085 1086 1087
	mutex_lock(&indio_dev->info_exist_lock);
	mutex_lock(&indio_dev->mlock);

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	if (insert_buffer && iio_buffer_is_active(insert_buffer))
		insert_buffer = NULL;

	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
		remove_buffer = NULL;

	if (!insert_buffer && !remove_buffer) {
		ret = 0;
		goto out_unlock;
	}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	if (indio_dev->info == NULL) {
		ret = -ENODEV;
		goto out_unlock;
	}

	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);

out_unlock:
	mutex_unlock(&indio_dev->mlock);
	mutex_unlock(&indio_dev->info_exist_lock);

	return ret;
}
1112 1113
EXPORT_SYMBOL_GPL(iio_update_buffers);

1114 1115 1116
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
	iio_disable_buffers(indio_dev);
1117
	iio_buffer_deactivate_all(indio_dev);
1118 1119
}

1120 1121 1122 1123
static ssize_t iio_buffer_store_enable(struct device *dev,
				       struct device_attribute *attr,
				       const char *buf,
				       size_t len)
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
1137
	inlist = iio_buffer_is_active(indio_dev->buffer);
1138 1139 1140 1141 1142
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
1143
		ret = __iio_update_buffers(indio_dev,
1144 1145
					 indio_dev->buffer, NULL);
	else
1146
		ret = __iio_update_buffers(indio_dev,
1147 1148