core.c 48.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Generic OPP Interface
 *
 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
 *	Nishanth Menon
 *	Romit Dasgupta
 *	Kevin Hilman
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

14 15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

Viresh Kumar's avatar
Viresh Kumar committed
16
#include <linux/clk.h>
17 18 19
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
20
#include <linux/device.h>
21
#include <linux/export.h>
22
#include <linux/pm_domain.h>
23
#include <linux/regulator/consumer.h>
24

25
#include "opp.h"
26 27

/*
28 29
 * The root of the list of all opp-tables. All opp_table structures branch off
 * from here, with each opp_table containing the list of opps it supports in
30 31
 * various states of availability.
 */
32
LIST_HEAD(opp_tables);
33
/* Lock to allow exclusive modification to the device and opp lists */
34
DEFINE_MUTEX(opp_table_lock);
35

36 37
static void dev_pm_opp_get(struct dev_pm_opp *opp);

38 39
static struct opp_device *_find_opp_dev(const struct device *dev,
					struct opp_table *opp_table)
40
{
41
	struct opp_device *opp_dev;
42

43 44 45
	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
		if (opp_dev->dev == dev)
			return opp_dev;
46 47 48 49

	return NULL;
}

50
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
51 52 53 54 55 56 57 58 59 60 61 62 63 64
{
	struct opp_table *opp_table;

	list_for_each_entry(opp_table, &opp_tables, node) {
		if (_find_opp_dev(dev, opp_table)) {
			_get_opp_table_kref(opp_table);

			return opp_table;
		}
	}

	return ERR_PTR(-ENODEV);
}

65
/**
66 67
 * _find_opp_table() - find opp_table struct using device pointer
 * @dev:	device pointer used to lookup OPP table
68
 *
69
 * Search OPP table for one containing matching device.
70
 *
71
 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
72 73
 * -EINVAL based on type of error.
 *
74
 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
75
 */
76
struct opp_table *_find_opp_table(struct device *dev)
77
{
78
	struct opp_table *opp_table;
79

80
	if (IS_ERR_OR_NULL(dev)) {
81 82 83 84
		pr_err("%s: Invalid parameters\n", __func__);
		return ERR_PTR(-EINVAL);
	}

85 86 87
	mutex_lock(&opp_table_lock);
	opp_table = _find_opp_table_unlocked(dev);
	mutex_unlock(&opp_table_lock);
88

89
	return opp_table;
90 91 92
}

/**
93
 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
94 95
 * @opp:	opp for which voltage has to be returned for
 *
96
 * Return: voltage in micro volt corresponding to the opp, else
97 98
 * return 0
 *
99
 * This is useful only for devices with single power supply.
100
 */
101
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
102
{
103
	if (IS_ERR_OR_NULL(opp)) {
104
		pr_err("%s: Invalid parameters\n", __func__);
105 106
		return 0;
	}
107

108
	return opp->supplies[0].u_volt;
109
}
110
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
111 112

/**
113
 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
114 115
 * @opp:	opp for which frequency has to be returned for
 *
116
 * Return: frequency in hertz corresponding to the opp, else
117 118
 * return 0
 */
119
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
120
{
121
	if (IS_ERR_OR_NULL(opp) || !opp->available) {
122
		pr_err("%s: Invalid parameters\n", __func__);
123 124
		return 0;
	}
125

126
	return opp->rate;
127
}
128
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
129

130 131 132 133 134 135 136 137 138 139 140 141
/**
 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 * @opp: opp for which turbo mode is being verified
 *
 * Turbo OPPs are not for normal use, and can be enabled (under certain
 * conditions) for short duration of times to finish high throughput work
 * quickly. Running on them for longer times may overheat the chip.
 *
 * Return: true if opp is turbo opp, else false.
 */
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
142
	if (IS_ERR_OR_NULL(opp) || !opp->available) {
143 144 145 146
		pr_err("%s: Invalid parameters\n", __func__);
		return false;
	}

147
	return opp->turbo;
148 149 150
}
EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);

151 152 153 154 155 156 157 158
/**
 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 * @dev:	device for which we do this operation
 *
 * Return: This function returns the max clock latency in nanoseconds.
 */
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
159
	struct opp_table *opp_table;
160 161
	unsigned long clock_latency_ns;

162 163
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
164 165 166 167 168
		return 0;

	clock_latency_ns = opp_table->clock_latency_ns_max;

	dev_pm_opp_put_opp_table(opp_table);
169 170 171 172 173

	return clock_latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);

174 175 176 177 178 179 180 181
/**
 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max voltage latency in nanoseconds.
 */
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
182
	struct opp_table *opp_table;
183
	struct dev_pm_opp *opp;
184
	struct regulator *reg;
185
	unsigned long latency_ns = 0;
186 187 188 189 190 191
	int ret, i, count;
	struct {
		unsigned long min;
		unsigned long max;
	} *uV;

192 193 194 195 196
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return 0;

	count = opp_table->regulator_count;
197 198 199

	/* Regulator may not be required for the device */
	if (!count)
200
		goto put_opp_table;
201 202 203

	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
	if (!uV)
204
		goto put_opp_table;
205

206 207
	mutex_lock(&opp_table->lock);

208 209 210
	for (i = 0; i < count; i++) {
		uV[i].min = ~0;
		uV[i].max = 0;
211

212
		list_for_each_entry(opp, &opp_table->opp_list, node) {
213 214 215 216 217 218 219 220
			if (!opp->available)
				continue;

			if (opp->supplies[i].u_volt_min < uV[i].min)
				uV[i].min = opp->supplies[i].u_volt_min;
			if (opp->supplies[i].u_volt_max > uV[i].max)
				uV[i].max = opp->supplies[i].u_volt_max;
		}
221 222
	}

223
	mutex_unlock(&opp_table->lock);
224 225

	/*
226
	 * The caller needs to ensure that opp_table (and hence the regulator)
227 228
	 * isn't freed, while we are executing this routine.
	 */
229
	for (i = 0; i < count; i++) {
230
		reg = opp_table->regulators[i];
231 232 233 234 235 236
		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
		if (ret > 0)
			latency_ns += ret * 1000;
	}

	kfree(uV);
237 238
put_opp_table:
	dev_pm_opp_put_opp_table(opp_table);
239 240 241 242 243

	return latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
/**
 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 *					     nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max transition latency, in nanoseconds, to
 * switch from one OPP to other.
 */
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
{
	return dev_pm_opp_get_max_volt_latency(dev) +
		dev_pm_opp_get_max_clock_latency(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);

259
/**
260
 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
261 262
 * @dev:	device for which we do this operation
 *
263 264
 * Return: This function returns the frequency of the OPP marked as suspend_opp
 * if one is available, else returns 0;
265
 */
266
unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
267
{
268
	struct opp_table *opp_table;
269
	unsigned long freq = 0;
270

271
	opp_table = _find_opp_table(dev);
272 273
	if (IS_ERR(opp_table))
		return 0;
274

275 276 277 278
	if (opp_table->suspend_opp && opp_table->suspend_opp->available)
		freq = dev_pm_opp_get_freq(opp_table->suspend_opp);

	dev_pm_opp_put_opp_table(opp_table);
279

280
	return freq;
281
}
282
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
283

284
/**
285
 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
286 287
 * @dev:	device for which we do this operation
 *
288
 * Return: This function returns the number of available opps if there are any,
289 290
 * else returns 0 if none or the corresponding error value.
 */
291
int dev_pm_opp_get_opp_count(struct device *dev)
292
{
293
	struct opp_table *opp_table;
294
	struct dev_pm_opp *temp_opp;
295 296
	int count = 0;

297 298 299
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		count = PTR_ERR(opp_table);
300
		dev_dbg(dev, "%s: OPP table not found (%d)\n",
301
			__func__, count);
302
		return count;
303 304
	}

305
	mutex_lock(&opp_table->lock);
306

307
	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
308 309 310 311
		if (temp_opp->available)
			count++;
	}

312
	mutex_unlock(&opp_table->lock);
313 314
	dev_pm_opp_put_opp_table(opp_table);

315 316
	return count;
}
317
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
318 319

/**
320
 * dev_pm_opp_find_freq_exact() - search for an exact frequency
321 322
 * @dev:		device for which we do this operation
 * @freq:		frequency to search for
323
 * @available:		true/false - match for available opp
324
 *
325
 * Return: Searches for exact match in the opp table and returns pointer to the
326 327
 * matching opp if found, else returns ERR_PTR in case of error and should
 * be handled using IS_ERR. Error return values can be:
328 329 330
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
331 332 333 334 335 336 337 338
 *
 * Note: available is a modifier for the search. if available=true, then the
 * match is for exact matching frequency and is available in the stored OPP
 * table. if false, the match is for exact frequency which is not available.
 *
 * This provides a mechanism to enable an opp which is not available currently
 * or the opposite as well.
 *
339 340
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
341
 */
342 343 344
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
					      unsigned long freq,
					      bool available)
345
{
346
	struct opp_table *opp_table;
347
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
348

349 350 351 352 353
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int r = PTR_ERR(opp_table);

		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
354 355 356
		return ERR_PTR(r);
	}

357
	mutex_lock(&opp_table->lock);
358

359
	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
360 361 362
		if (temp_opp->available == available &&
				temp_opp->rate == freq) {
			opp = temp_opp;
363 364 365

			/* Increment the reference count of OPP */
			dev_pm_opp_get(opp);
366 367 368 369
			break;
		}
	}

370
	mutex_unlock(&opp_table->lock);
371
	dev_pm_opp_put_opp_table(opp_table);
372

373 374
	return opp;
}
375
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
376

377 378 379 380 381
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
						   unsigned long *freq)
{
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);

382 383 384
	mutex_lock(&opp_table->lock);

	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
385 386 387
		if (temp_opp->available && temp_opp->rate >= *freq) {
			opp = temp_opp;
			*freq = opp->rate;
388 389 390

			/* Increment the reference count of OPP */
			dev_pm_opp_get(opp);
391 392 393 394
			break;
		}
	}

395 396
	mutex_unlock(&opp_table->lock);

397 398 399
	return opp;
}

400
/**
401
 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
402 403 404 405 406 407
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching ceil *available* OPP from a starting freq
 * for a device.
 *
408
 * Return: matching *opp and refreshes *freq accordingly, else returns
409 410 411 412 413
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
414
 *
415 416
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
417
 */
418 419
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
					     unsigned long *freq)
420
{
421
	struct opp_table *opp_table;
422
	struct dev_pm_opp *opp;
423

424 425 426 427 428
	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

429
	opp_table = _find_opp_table(dev);
430
	if (IS_ERR(opp_table))
431
		return ERR_CAST(opp_table);
432

433
	opp = _find_freq_ceil(opp_table, freq);
434

435
	dev_pm_opp_put_opp_table(opp_table);
436 437

	return opp;
438
}
439
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
440 441

/**
442
 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
443 444 445 446 447 448
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching floor *available* OPP from a starting freq
 * for a device.
 *
449
 * Return: matching *opp and refreshes *freq accordingly, else returns
450 451 452 453 454
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
455
 *
456 457
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
458
 */
459 460
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
					      unsigned long *freq)
461
{
462
	struct opp_table *opp_table;
463
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
464 465 466 467 468 469

	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

470
	opp_table = _find_opp_table(dev);
471
	if (IS_ERR(opp_table))
472
		return ERR_CAST(opp_table);
473

474
	mutex_lock(&opp_table->lock);
475

476
	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
477 478 479 480 481 482 483 484
		if (temp_opp->available) {
			/* go to the next node, before choosing prev */
			if (temp_opp->rate > *freq)
				break;
			else
				opp = temp_opp;
		}
	}
485 486 487 488

	/* Increment the reference count of OPP */
	if (!IS_ERR(opp))
		dev_pm_opp_get(opp);
489
	mutex_unlock(&opp_table->lock);
490
	dev_pm_opp_put_opp_table(opp_table);
491

492 493 494 495 496
	if (!IS_ERR(opp))
		*freq = opp->rate;

	return opp;
}
497
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
498

499
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
500
			    struct dev_pm_opp_supply *supply)
501 502 503 504 505 506 507 508 509 510
{
	int ret;

	/* Regulator not available for device */
	if (IS_ERR(reg)) {
		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
			PTR_ERR(reg));
		return 0;
	}

511 512
	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
513

514 515
	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
					    supply->u_volt, supply->u_volt_max);
516 517
	if (ret)
		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
518 519
			__func__, supply->u_volt_min, supply->u_volt,
			supply->u_volt_max, ret);
520 521 522 523

	return ret;
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static inline int
_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
			  unsigned long old_freq, unsigned long freq)
{
	int ret;

	ret = clk_set_rate(clk, freq);
	if (ret) {
		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
			ret);
	}

	return ret;
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static inline int
_generic_set_opp_domain(struct device *dev, struct clk *clk,
			unsigned long old_freq, unsigned long freq,
			unsigned int old_pstate, unsigned int new_pstate)
{
	int ret;

	/* Scaling up? Scale domain performance state before frequency */
	if (freq > old_freq) {
		ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
		if (ret)
			return ret;
	}

	ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
	if (ret)
		goto restore_domain_state;

	/* Scaling down? Scale domain performance state after frequency */
	if (freq < old_freq) {
		ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
		if (ret)
			goto restore_freq;
	}

	return 0;

restore_freq:
	if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
			__func__, old_freq);
restore_domain_state:
	if (freq > old_freq)
		dev_pm_genpd_set_performance_state(dev, old_pstate);

	return ret;
}

577 578 579 580 581 582
static int _generic_set_opp_regulator(const struct opp_table *opp_table,
				      struct device *dev,
				      unsigned long old_freq,
				      unsigned long freq,
				      struct dev_pm_opp_supply *old_supply,
				      struct dev_pm_opp_supply *new_supply)
583
{
584
	struct regulator *reg = opp_table->regulators[0];
585 586 587
	int ret;

	/* This function only supports single regulator per device */
588
	if (WARN_ON(opp_table->regulator_count > 1)) {
589 590 591 592 593 594 595 596 597 598 599 600
		dev_err(dev, "multiple regulators are not supported\n");
		return -EINVAL;
	}

	/* Scaling up? Scale voltage before frequency */
	if (freq > old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_voltage;
	}

	/* Change frequency */
601
	ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
602 603 604 605 606 607 608 609 610 611 612 613 614
	if (ret)
		goto restore_voltage;

	/* Scaling down? Scale voltage after frequency */
	if (freq < old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_freq;
	}

	return 0;

restore_freq:
615
	if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
616 617 618 619
		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
			__func__, old_freq);
restore_voltage:
	/* This shouldn't harm even if the voltages weren't updated earlier */
620
	if (old_supply)
621 622 623 624 625
		_set_opp_voltage(dev, reg, old_supply);

	return ret;
}

626 627 628 629 630 631 632 633 634 635
/**
 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 * @dev:	 device for which we do this operation
 * @target_freq: frequency to achieve
 *
 * This configures the power-supplies and clock source to the levels specified
 * by the OPP corresponding to the target_freq.
 */
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
636
	struct opp_table *opp_table;
637
	unsigned long freq, old_freq;
638 639
	struct dev_pm_opp *old_opp, *opp;
	struct clk *clk;
640
	int ret, size;
641 642 643 644 645 646 647

	if (unlikely(!target_freq)) {
		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
			target_freq);
		return -EINVAL;
	}

648 649 650 651 652 653 654 655 656 657 658 659 660
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
		return PTR_ERR(opp_table);
	}

	clk = opp_table->clk;
	if (IS_ERR(clk)) {
		dev_err(dev, "%s: No clock available for the device\n",
			__func__);
		ret = PTR_ERR(clk);
		goto put_opp_table;
	}
661 662 663 664 665 666 667 668 669 670 671

	freq = clk_round_rate(clk, target_freq);
	if ((long)freq <= 0)
		freq = target_freq;

	old_freq = clk_get_rate(clk);

	/* Return early if nothing to do */
	if (old_freq == freq) {
		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
			__func__, freq);
672 673
		ret = 0;
		goto put_opp_table;
674 675
	}

676
	old_opp = _find_freq_ceil(opp_table, &old_freq);
677
	if (IS_ERR(old_opp)) {
678 679 680 681
		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
			__func__, old_freq, PTR_ERR(old_opp));
	}

682
	opp = _find_freq_ceil(opp_table, &freq);
683 684 685 686
	if (IS_ERR(opp)) {
		ret = PTR_ERR(opp);
		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
			__func__, freq, ret);
687
		goto put_old_opp;
688 689
	}

690 691
	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
		old_freq, freq);
692

693
	/* Only frequency scaling */
694
	if (!opp_table->regulators) {
695 696 697 698 699 700 701 702 703 704
		/*
		 * We don't support devices with both regulator and
		 * domain performance-state for now.
		 */
		if (opp_table->genpd_performance_state)
			ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
						      IS_ERR(old_opp) ? 0 : old_opp->pstate,
						      opp->pstate);
		else
			ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	} else if (!opp_table->set_opp) {
		ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
						 IS_ERR(old_opp) ? NULL : old_opp->supplies,
						 opp->supplies);
	} else {
		struct dev_pm_set_opp_data *data;

		data = opp_table->set_opp_data;
		data->regulators = opp_table->regulators;
		data->regulator_count = opp_table->regulator_count;
		data->clk = clk;
		data->dev = dev;

		data->old_opp.rate = old_freq;
		size = sizeof(*opp->supplies) * opp_table->regulator_count;
		if (IS_ERR(old_opp))
			memset(data->old_opp.supplies, 0, size);
		else
			memcpy(data->old_opp.supplies, old_opp->supplies, size);

		data->new_opp.rate = freq;
		memcpy(data->new_opp.supplies, opp->supplies, size);

		ret = opp_table->set_opp(data);
729 730
	}

731
	dev_pm_opp_put(opp);
732
put_old_opp:
733 734
	if (!IS_ERR(old_opp))
		dev_pm_opp_put(old_opp);
735
put_opp_table:
736
	dev_pm_opp_put_opp_table(opp_table);
737
	return ret;
738 739 740
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);

741 742 743
/* OPP-dev Helpers */
static void _remove_opp_dev(struct opp_device *opp_dev,
			    struct opp_table *opp_table)
744
{
745 746
	opp_debug_unregister(opp_dev, opp_table);
	list_del(&opp_dev->node);
747
	kfree(opp_dev);
748 749
}

750 751
struct opp_device *_add_opp_dev(const struct device *dev,
				struct opp_table *opp_table)
752
{
753
	struct opp_device *opp_dev;
Viresh Kumar's avatar
Viresh Kumar committed
754
	int ret;
755

756 757
	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
	if (!opp_dev)
758 759
		return NULL;

760 761
	/* Initialize opp-dev */
	opp_dev->dev = dev;
762
	list_add(&opp_dev->node, &opp_table->dev_list);
763

764 765
	/* Create debugfs entries for the opp_table */
	ret = opp_debug_register(opp_dev, opp_table);
Viresh Kumar's avatar
Viresh Kumar committed
766 767 768 769
	if (ret)
		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
			__func__, ret);

770
	return opp_dev;
771 772
}

773
static struct opp_table *_allocate_opp_table(struct device *dev)
774
{
775 776
	struct opp_table *opp_table;
	struct opp_device *opp_dev;
Viresh Kumar's avatar
Viresh Kumar committed
777
	int ret;
778 779

	/*
780
	 * Allocate a new OPP table. In the infrequent case where a new
781 782
	 * device is needed to be added, we pay this penalty.
	 */
783 784
	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
	if (!opp_table)
785 786
		return NULL;

787
	INIT_LIST_HEAD(&opp_table->dev_list);
788

789 790 791
	opp_dev = _add_opp_dev(dev, opp_table);
	if (!opp_dev) {
		kfree(opp_table);
792 793 794
		return NULL;
	}

795
	_of_init_opp_table(opp_table, dev);
796

Viresh Kumar's avatar
Viresh Kumar committed
797
	/* Find clk for the device */
798 799 800
	opp_table->clk = clk_get(dev, NULL);
	if (IS_ERR(opp_table->clk)) {
		ret = PTR_ERR(opp_table->clk);
Viresh Kumar's avatar
Viresh Kumar committed
801 802 803 804 805
		if (ret != -EPROBE_DEFER)
			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
				ret);
	}

806
	BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
807
	INIT_LIST_HEAD(&opp_table->opp_list);
808
	mutex_init(&opp_table->lock);
809
	kref_init(&opp_table->kref);
810

811
	/* Secure the device table modification */
812
	list_add(&opp_table->node, &opp_tables);
813
	return opp_table;
814 815
}

816
void _get_opp_table_kref(struct opp_table *opp_table)
817
{
818 819 820 821 822 823 824 825 826 827
	kref_get(&opp_table->kref);
}

struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
	struct opp_table *opp_table;

	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);

828 829
	opp_table = _find_opp_table_unlocked(dev);
	if (!IS_ERR(opp_table))
830 831 832 833 834 835 836 837 838 839 840
		goto unlock;

	opp_table = _allocate_opp_table(dev);

unlock:
	mutex_unlock(&opp_table_lock);

	return opp_table;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);

841
static void _opp_table_kref_release(struct kref *kref)
842 843
{
	struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
844 845 846 847 848 849 850 851 852 853 854 855 856 857
	struct opp_device *opp_dev;

	/* Release clk */
	if (!IS_ERR(opp_table->clk))
		clk_put(opp_table->clk);

	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
				   node);

	_remove_opp_dev(opp_dev, opp_table);

	/* dev_list must be empty now */
	WARN_ON(!list_empty(&opp_table->dev_list));

858
	mutex_destroy(&opp_table->lock);
859 860
	list_del(&opp_table->node);
	kfree(opp_table);
861

862 863 864 865 866 867 868 869 870 871
	mutex_unlock(&opp_table_lock);
}

void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
	kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
		       &opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);

872
void _opp_free(struct dev_pm_opp *opp)
873 874 875 876
{
	kfree(opp);
}

877
static void _opp_kref_release(struct kref *kref)
878
{
879 880
	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
	struct opp_table *opp_table = opp->opp_table;
881

882 883 884 885
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
886
	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
Viresh Kumar's avatar
Viresh Kumar committed
887
	opp_debug_remove_one(opp);
888 889
	list_del(&opp->node);
	kfree(opp);
890

891
	mutex_unlock(&opp_table->lock);
892
	dev_pm_opp_put_opp_table(opp_table);
893 894
}

895 896 897 898 899
static void dev_pm_opp_get(struct dev_pm_opp *opp)
{
	kref_get(&opp->kref);
}

900 901 902 903 904 905
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
	kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);

906
/**
907
 * dev_pm_opp_remove()  - Remove an OPP from OPP table
908 909 910
 * @dev:	device for which we do this operation
 * @freq:	OPP to remove with matching 'freq'
 *
911
 * This function removes an opp from the opp table.
912 913 914 915
 */
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
	struct dev_pm_opp *opp;
916
	struct opp_table *opp_table;
917 918
	bool found = false;

919 920
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
921
		return;
922

923 924
	mutex_lock(&opp_table->lock);

925
	list_for_each_entry(opp, &opp_table->opp_list, node) {
926 927 928 929 930 931
		if (opp->rate == freq) {
			found = true;
			break;
		}
	}

932 933
	mutex_unlock(&opp_table->lock);

934 935 936
	if (found) {
		dev_pm_opp_put(opp);
	} else {
937 938 939 940
		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
			 __func__, freq);
	}

941
	dev_pm_opp_put_opp_table(opp_table);
942 943 944
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);

945
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
946
{
947
	struct dev_pm_opp *opp;
948
	int count, supply_size;
949

950 951 952
	/* Allocate space for at least one supply */
	count = table->regulator_count ? table->regulator_count : 1;
	supply_size = sizeof(*opp->supplies) * count;
953

954 955
	/* allocate new OPP node and supplies structures */
	opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
956
	if (!opp)
957 958
		return NULL;

959 960 961 962
	/* Put the supplies at the end of the OPP structure as an empty array */
	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
	INIT_LIST_HEAD(&opp->node);

963 964 965
	return opp;
}

966
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
967
					 struct opp_table *opp_table)
968
{
969 970 971 972 973 974 975 976 977 978 979 980 981 982
	struct regulator *reg;
	int i;

	for (i = 0; i < opp_table->regulator_count; i++) {
		reg = opp_table->regulators[i];

		if (!regulator_is_supported_voltage(reg,
					opp->supplies[i].u_volt_min,
					opp->supplies[i].u_volt_max)) {
			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
				__func__, opp->supplies[i].u_volt_min,
				opp->supplies[i].u_volt_max);
			return false;
		}
983 984 985 986 987
	}

	return true;
}

988 989 990 991 992 993 994 995 996 997
/*
 * Returns:
 * 0: On success. And appropriate error message for duplicate OPPs.
 * -EBUSY: For OPP with same freq/volt and is available. The callers of
 *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
 *  sure we don't print error messages unnecessarily if different parts of
 *  kernel try to initialize the OPP table.
 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
 *  should be considered an error by the callers of _opp_add().
 */
998 999
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
	     struct opp_table *opp_table)
1000 1001
{
	struct dev_pm_opp *opp;
1002
	struct list_head *head;
Viresh Kumar's avatar
Viresh Kumar committed
1003
	int ret;
1004 1005 1006 1007 1008

	/*
	 * Insert new OPP in order of increasing frequency and discard if
	 * already present.
	 *
1009
	 * Need to use &opp_table->opp_list in the condition part of the 'for'
1010 1011 1012
	 * loop, don't replace it with head otherwise it will become an infinite
	 * loop.
	 */
1013 1014 1015
	mutex_lock(&opp_table->lock);
	head = &opp_table->opp_list;

1016
	list_for_each_entry(opp, &opp_table->opp_list, node) {
1017 1018 1019 1020 1021 1022 1023 1024 1025
		if (new_opp->rate > opp->rate) {
			head = &opp->node;
			continue;
		}

		if (new_opp->rate < opp->rate)
			break;

		/* Duplicate OPPs */
1026
		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1027 1028 1029
			 __func__, opp->rate, opp->supplies[0].u_volt,
			 opp->available, new_opp->rate,
			 new_opp->supplies[0].u_volt, new_opp->available);
1030

1031
		/* Should we compare voltages for all regulators here ? */
1032 1033 1034 1035 1036
		ret = opp->available &&
		      new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;

		mutex_unlock(&opp_table->lock);
		return ret;
1037 1038
	}

1039 1040 1041
	if (opp_table->get_pstate)
		new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);

1042
	list_add(&new_opp->node, head);
1043 1044 1045
	mutex_unlock(&opp_table->lock);

	new_opp->opp_table = opp_table;
1046
	kref_init(&new_opp->kref);
1047

1048 1049 1050
	/* Get a reference to the OPP table */
	_get_opp_table_kref(opp_table);

1051
	ret = opp_debug_create_one(new_opp, opp_table);
Viresh Kumar's avatar
Viresh Kumar committed
1052 1053 1054 1055
	if (ret)
		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
			__func__, ret);

1056
	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1057 1058 1059 1060 1061
		new_opp->available = false;
		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
			 __func__, new_opp->rate);
	}

1062 1063 1064
	return 0;
}

1065
/**
1066
 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1067
 * @opp_table:	OPP table
1068 1069 1070 1071 1072
 * @dev:	device for which we do this operation
 * @freq:	Frequency in Hz for this OPP
 * @u_volt:	Voltage in uVolts for this OPP
 * @dynamic:	Dynamically added OPPs.
 *
1073
 * This function adds an opp definition to the opp table and returns status.
1074 1075 1076
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
 *
1077 1078
 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
 * and freed by dev_pm_opp_of_remove_table.
1079 1080 1081 1082 1083 1084 1085 1086
 *
 * Return:
 * 0		On success OR
 *		Duplicate OPPs (both freq and volt are same) and opp->available
 * -EEXIST	Freq are same and volt are different OR
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
 * -ENOMEM	Memory allocation failure
 */
1087 1088
int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
		unsigned long freq, long u_volt, bool dynamic)
1089
{
1090
	struct dev_pm_opp *new_opp;
1091
	unsigned long tol;
1092
	int ret;
1093

1094 1095 1096
	new_opp = _opp_allocate(opp_table);
	if (!new_opp)
		return -ENOMEM;
1097

1098 1099
	/* populate the opp table */
	new_opp->rate = freq;
1100
	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1101 1102 1103
	new_opp->supplies[0].u_volt = u_volt;
	new_opp->supplies[0].u_volt_min = u_volt - tol;
	new_opp->supplies[0].u_volt_max = u_volt + tol;
1104
	new_opp->available = true;
1105
	new_opp->dynamic = dynamic;
1106

1107
	ret = _opp_add(dev, new_opp, opp_table);
1108 1109 1110 1111
	if (ret) {
		/* Don't return error for duplicate OPPs */
		if (ret == -EBUSY)
			ret = 0;
1112
		goto free_opp;
1113
	}
1114

1115 1116 1117 1118
	/*
	 * Notify the changes in the availability of the operable
	 * freque