task.c 62.7 KB
Newer Older
Philippe Gerum's avatar
Philippe Gerum committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.

 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
 */

#include <sched.h>
#include <pthread.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
24 25
#include "copperplate/heapobj.h"
#include "copperplate/internal.h"
26
#include "internal.h"
Philippe Gerum's avatar
Philippe Gerum committed
27
#include "task.h"
28 29
#include "buffer.h"
#include "queue.h"
Philippe Gerum's avatar
Philippe Gerum committed
30
#include "timer.h"
31
#include "heap.h"
Philippe Gerum's avatar
Philippe Gerum committed
32

33 34 35 36 37 38 39 40 41 42 43 44 45
/**
 * @ingroup alchemy
 * @defgroup alchemy_task Task management services
 *
 * Services dealing with preemptive multi-tasking
 *
 * Each Alchemy task is an independent portion of the overall
 * application code embodied in a C procedure, which executes on its
 * own stack context.
 *
 * @{
 */

46 47 48 49
union alchemy_wait_union {
	struct alchemy_task_wait task_wait;
	struct alchemy_buffer_wait buffer_wait;
	struct alchemy_queue_wait queue_wait;
50
	struct alchemy_heap_wait heap_wait;
51 52
};

53
struct syncluster alchemy_task_table;
Philippe Gerum's avatar
Philippe Gerum committed
54

55 56
static DEFINE_NAME_GENERATOR(task_namegen, "task",
			     struct alchemy_task, name);
Philippe Gerum's avatar
Philippe Gerum committed
57

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
#ifdef CONFIG_XENO_REGISTRY

static int task_registry_open(struct fsobj *fsobj, void *priv)
{
	struct fsobstack *o = priv;
	struct threadobj_stat buf;
	struct alchemy_task *tcb;
	int ret;

	tcb = container_of(fsobj, struct alchemy_task, fsobj);
	ret = threadobj_lock(&tcb->thobj);
	if (ret)
		return -EIO;

	ret = threadobj_stat(&tcb->thobj, &buf);
	threadobj_unlock(&tcb->thobj);
	if (ret)
		return ret;

	fsobstack_init(o);

	fsobstack_finish(o);

	return 0;
}

static struct registry_operations registry_ops = {
	.open		= task_registry_open,
	.release	= fsobj_obstack_release,
	.read		= fsobj_obstack_read
};

#else /* !CONFIG_XENO_REGISTRY */

static struct registry_operations registry_ops;

#endif /* CONFIG_XENO_REGISTRY */

Philippe Gerum's avatar
Philippe Gerum committed
96 97 98 99
static struct alchemy_task *find_alchemy_task(RT_TASK *task, int *err_r)
{
	struct alchemy_task *tcb;

100
	if (bad_pointer(task))
Philippe Gerum's avatar
Philippe Gerum committed
101 102 103
		goto bad_handle;

	tcb = mainheap_deref(task->handle, struct alchemy_task);
104
	if (bad_pointer(tcb))
Philippe Gerum's avatar
Philippe Gerum committed
105 106
		goto bad_handle;

107
	if (threadobj_get_magic(&tcb->thobj) == task_magic)
Philippe Gerum's avatar
Philippe Gerum committed
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		return tcb;
bad_handle:
	*err_r = -EINVAL;

	return NULL;
}

static struct alchemy_task *find_alchemy_task_or_self(RT_TASK *task, int *err_r)
{
	struct alchemy_task *current;

	if (task)
		return find_alchemy_task(task, err_r);

	current = alchemy_task_current();
	if (current == NULL) {
		*err_r = -EPERM;
		return NULL;
	}
Gilles Chanteperdrix's avatar
Gilles Chanteperdrix committed
127

Philippe Gerum's avatar
Philippe Gerum committed
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	return current;
}

struct alchemy_task *get_alchemy_task(RT_TASK *task, int *err_r)
{
	struct alchemy_task *tcb = find_alchemy_task(task, err_r);

	/*
	 * Grab the task lock, assuming that the task might have been
	 * deleted, and/or maybe we have been lucky, and some random
	 * opaque pointer might lead us to something which is laid in
	 * valid memory but certainly not to a task object. Last
	 * chance is pthread_mutex_lock() detecting a wrong mutex kind
	 * and bailing out.
	 */
143 144
	if (tcb == NULL || threadobj_lock(&tcb->thobj) == -EINVAL) {
		*err_r = -EINVAL;
Philippe Gerum's avatar
Philippe Gerum committed
145
		return NULL;
146
	}
Philippe Gerum's avatar
Philippe Gerum committed
147 148 149 150

	/* Check the magic word again, while we hold the lock. */
	if (threadobj_get_magic(&tcb->thobj) != task_magic) {
		threadobj_unlock(&tcb->thobj);
151
		*err_r = -EINVAL;
Philippe Gerum's avatar
Philippe Gerum committed
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		return NULL;
	}

	return tcb;
}

struct alchemy_task *get_alchemy_task_or_self(RT_TASK *task, int *err_r)
{
	struct alchemy_task *current;

	if (task)
		return get_alchemy_task(task, err_r);

	current = alchemy_task_current();
	if (current == NULL) {
		*err_r = -EPERM;
		return NULL;
	}

	/* This one might block but can't fail, it is ours. */
	threadobj_lock(&current->thobj);

	return current;
}

void put_alchemy_task(struct alchemy_task *tcb)
{
	threadobj_unlock(&tcb->thobj);
}

static void task_finalizer(struct threadobj *thobj)
{
	struct alchemy_task *tcb;
185
	struct syncstate syns;
186
	int ret;
Philippe Gerum's avatar
Philippe Gerum committed
187 188

	tcb = container_of(thobj, struct alchemy_task, thobj);
189
	registry_destroy_file(&tcb->fsobj);
190
	syncluster_delobj(&alchemy_task_table, &tcb->cobj);
191
	/*
192 193
	 * The msg sync may be pended by other threads, so we do have
	 * to use syncobj_destroy() on it (i.e. NOT syncobj_uninit()).
194
	 */
195 196 197
	ret = __bt(syncobj_lock(&tcb->sobj_msg, &syns));
	if (ret == 0)
		syncobj_destroy(&tcb->sobj_msg, &syns);
Philippe Gerum's avatar
Philippe Gerum committed
198 199
}

200 201 202 203 204 205 206 207
static int task_prologue_1(void *arg)
{
	struct alchemy_task *tcb = arg;

	return __bt(threadobj_prologue(&tcb->thobj, tcb->name));
}

static int task_prologue_2(struct alchemy_task *tcb)
Philippe Gerum's avatar
Philippe Gerum committed
208 209 210
{
	int ret;

211
	threadobj_wait_start();
212
	threadobj_lock(&tcb->thobj);
213
	ret = threadobj_set_mode(0, tcb->mode, NULL);
214
	threadobj_unlock(&tcb->thobj);
Philippe Gerum's avatar
Philippe Gerum committed
215

216
	return ret;
Philippe Gerum's avatar
Philippe Gerum committed
217 218
}

219
static void *task_entry(void *arg)
Philippe Gerum's avatar
Philippe Gerum committed
220 221
{
	struct alchemy_task *tcb = arg;
222
	struct service svc;
Philippe Gerum's avatar
Philippe Gerum committed
223 224
	int ret;

225 226
	CANCEL_DEFER(svc);

227
	ret = __bt(task_prologue_2(tcb));
228 229
	if (ret) {
		CANCEL_RESTORE(svc);
230
		return (void *)(long)ret;
231
	}
Philippe Gerum's avatar
Philippe Gerum committed
232

233
	threadobj_notify_entry();
234 235 236

	CANCEL_RESTORE(svc);

Philippe Gerum's avatar
Philippe Gerum committed
237
	tcb->entry(tcb->arg);
238

239
	return NULL;
Philippe Gerum's avatar
Philippe Gerum committed
240 241
}

242 243 244
static void delete_tcb(struct alchemy_task *tcb)
{
	syncobj_uninit(&tcb->sobj_msg);
245
	threadobj_uninit(&tcb->thobj);
246
	threadobj_free(&tcb->thobj);
247 248
}

249
static int create_tcb(struct alchemy_task **tcbp, RT_TASK *task,
Philippe Gerum's avatar
Philippe Gerum committed
250 251 252 253
		      const char *name, int prio, int mode)
{
	struct threadobj_init_data idata;
	struct alchemy_task *tcb;
254
	int ret;
Philippe Gerum's avatar
Philippe Gerum committed
255

256 257 258
	if (threadobj_irq_p())
		return -EPERM;

Philippe Gerum's avatar
Philippe Gerum committed
259 260 261 262
	ret = check_task_priority(prio);
	if (ret)
		return ret;

263 264
	tcb = threadobj_alloc(struct alchemy_task, thobj,
			      union alchemy_wait_union);
Philippe Gerum's avatar
Philippe Gerum committed
265 266 267
	if (tcb == NULL)
		return -ENOMEM;

268
	generate_name(tcb->name, name, &task_namegen);
Philippe Gerum's avatar
Philippe Gerum committed
269 270 271 272 273 274 275

	tcb->mode = mode;
	tcb->entry = NULL;	/* Not yet known. */
	tcb->arg = NULL;

	CPU_ZERO(&tcb->affinity);

276 277 278 279 280
	ret = syncobj_init(&tcb->sobj_msg, CLOCK_COPPERPLATE,
			   SYNCOBJ_PRIO, fnref_null);
	if (ret)
		goto fail_syncinit;

281
	tcb->suspends = 0;
282
	tcb->flowgen = 0;
Philippe Gerum's avatar
Philippe Gerum committed
283 284 285

	idata.magic = task_magic;
	idata.finalizer = task_finalizer;
286 287
	idata.policy = prio ? SCHED_FIFO : SCHED_OTHER;
	idata.param_ex.sched_priority = prio;
288 289 290
	ret = threadobj_init(&tcb->thobj, &idata);
	if (ret)
		goto fail_threadinit;
291

292 293 294 295 296 297 298 299 300 301 302
	*tcbp = tcb;

	/*
	 * CAUTION: The task control block must be fully built before
	 * we publish it through syncluster_addobj(), at which point
	 * it could be referred to immediately from another task as we
	 * got preempted. In addition, the task descriptor must be
	 * updated prior to starting the task.
	 */
	tcb->self.handle = mainheap_ref(tcb, uintptr_t);

303
	registry_init_file_obstack(&tcb->fsobj, &registry_ops);
304 305 306 307 308
	ret = __bt(registry_add_file(&tcb->fsobj, O_RDONLY,
				     "/alchemy/tasks/%s", tcb->name));
	if (ret)
		warning("failed to export task %s to registry, %s",
			tcb->name, symerror(ret));
309

310 311
	ret = syncluster_addobj(&alchemy_task_table, tcb->name, &tcb->cobj);
	if (ret)
312
		goto fail_register;
313

314 315
	if (task)
		task->handle = tcb->self.handle;
Philippe Gerum's avatar
Philippe Gerum committed
316 317

	return 0;
318 319 320

fail_register:
	registry_destroy_file(&tcb->fsobj);
321
	threadobj_uninit(&tcb->thobj);
322 323 324
fail_threadinit:
	syncobj_uninit(&tcb->sobj_msg);
fail_syncinit:
325
	threadobj_free(&tcb->thobj);
326 327

	return ret;
Philippe Gerum's avatar
Philippe Gerum committed
328 329
}

330 331
/**
 * @fn int rt_task_create(RT_TASK *task, const char *name, int stksize, int prio, int mode)
332
 * @brief Create a task with Alchemy personality.
333
 *
334 335 336 337
 * This service creates a task with access to the full set of Alchemy
 * services. If @a prio is non-zero, the new task belongs to Xenomai's
 * real-time FIFO scheduling class, aka SCHED_FIFO. If @a prio is
 * zero, the task belongs to the regular SCHED_OTHER class.
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
 *
 * Creating tasks with zero priority is useful for running non
 * real-time processes which may invoke blocking real-time services,
 * such as pending on a semaphore, reading from a message queue or a
 * buffer, and so on.
 *
 * Once created, the task is left dormant until it is actually started
 * by rt_task_start().
 *
 * @param task The address of a task descriptor which can be later
 * used to identify uniquely the created object, upon success of this
 * call.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * task. When non-NULL and non-empty, a copy of this string is
 * used for indexing the created task into the object registry.
 *
 * @param stksize The size of the stack (in bytes) for the new
 * task. If zero is passed, a system-dependent default size will be
 * substituted.
 *
 * @param prio The base priority of the new task. This value must be
 * in the [0 .. 99] range, where 0 is the lowest effective priority. 
 *
 * @param mode The task creation mode. The following flags can be
363
 * OR'ed into this bitmask:
364 365 366 367 368
 *
 * - T_JOINABLE allows another task to wait on the termination of the
 * new task. rt_task_join() shall be called for this task to clean up
 * any resources after its termination.
 *
369 370 371 372
 * - T_LOCK causes the new task to lock the scheduler prior to
 * entering the user routine specified by rt_task_start(). A call to
 * rt_task_set_mode() from the new task is required to drop this lock.
 *
373
 * - When running over the Cobalt core, T_WARNSW causes the SIGDEBUG
374 375 376 377 378
 * signal to be sent to the current task whenever it switches to the
 * secondary mode. This feature is useful to detect unwanted
 * migrations to the Linux domain. This flag has no effect over the
 * Mercury core.
 *
379
 * @return Zero is returned upon success. Otherwise:
380 381 382 383 384 385 386 387 388 389
 *
 * - -EINVAL is returned if either @a prio, @a mode or @a stksize are
 * invalid.
 *
 * - -ENOMEM is returned if the system fails to get memory from the
 * main heap in order to create the task.
 *
 * - -EEXIST is returned if the @a name is conflicting with an already
 * registered task.
 *
390 391 392 393
 * - -EPERM is returned if this service was called from an invalid
 * context, e.g. interrupt or non-Xenomai thread.
 *
 * @apitags{xthread-only, mode-unrestricted, switch-secondary}
394
 *
395 396
 * @sideeffect
 * - When running over the Cobalt core:
397
 *
398
 *   - calling rt_task_create() causes SCHED_FIFO tasks to switch to
399 400
 * secondary mode.
 *
401
 *   - members of Xenomai's SCHED_FIFO class running in the primary
402 403
 * domain have utmost priority over all Linux activities in the
 * system, including Linux interrupt handlers.
404
 *
405
 * - When running over the Mercury core, the new task belongs to the
406
 * regular POSIX SCHED_FIFO class.
407 408 409 410
 *
 * @note Tasks can be referred to from multiple processes which all
 * belong to the same Xenomai session.
 */
411
#ifndef DOXYGEN_CPP
412 413
CURRENT_IMPL(int, rt_task_create, (RT_TASK *task, const char *name,
				   int stksize, int prio, int mode))
414 415 416 417
#else
int rt_task_create(RT_TASK *task, const char *name,
		   int stksize, int prio, int mode)
#endif
Philippe Gerum's avatar
Philippe Gerum committed
418
{
419
	struct corethread_attributes cta;
Philippe Gerum's avatar
Philippe Gerum committed
420 421
	struct alchemy_task *tcb;
	struct service svc;
422
	int ret;
Philippe Gerum's avatar
Philippe Gerum committed
423

424 425 426
	if (mode & ~(T_LOCK | T_WARNSW | T_JOINABLE))
		return -EINVAL;

427
	CANCEL_DEFER(svc);
Philippe Gerum's avatar
Philippe Gerum committed
428

429
	ret = create_tcb(&tcb, task, name, prio, mode);
Philippe Gerum's avatar
Philippe Gerum committed
430 431 432 433
	if (ret)
		goto out;

	/* We want this to be set prior to spawning the thread. */
Philippe Gerum's avatar
Philippe Gerum committed
434
	tcb->self = *task;
Philippe Gerum's avatar
Philippe Gerum committed
435

436
	cta.detachstate = mode & T_JOINABLE ?
437
		PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED;
438 439
	cta.policy = threadobj_get_policy(&tcb->thobj);
	threadobj_copy_schedparam(&cta.param_ex, &tcb->thobj);
440 441
	cta.prologue = task_prologue_1;
	cta.run = task_entry;
442 443
	cta.arg = tcb;
	cta.stacksize = stksize;
444

445
	ret = __bt(copperplate_create_thread(&cta, &tcb->thobj.ptid));
446
	if (ret) {
447
		delete_tcb(tcb);
448 449
	} else {
		tcb->self.thread = tcb->thobj.ptid;
450
		task->thread = tcb->thobj.ptid;
451
	}
Philippe Gerum's avatar
Philippe Gerum committed
452
out:
453
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
454 455 456 457

	return ret;
}

458 459 460 461 462 463 464 465 466 467 468
/**
 * @fn int rt_task_delete(RT_TASK *task)
 * @brief Delete a real-time task.
 *
 * This call terminates a task previously created by
 * rt_task_create().
 *
 * Tasks created with the T_JOINABLE flag shall be joined by a
 * subsequent call to rt_task_join() once successfully deleted, to
 * reclaim all resources.
 *
469
 * @param task The task descriptor.
470 471 472 473 474
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor.
 *
475
 * - -EPERM is returned if @a task is NULL and this service was called
476 477 478
 * from an invalid context. In addition, this error is always raised
 * when this service is called from asynchronous context, such as a
 * timer/alarm handler.
479
 *
480
 * @apitags{mode-unrestricted, switch-secondary}
481
 *
482
 * @note The caller must be an Alchemy task if @a task is NULL.
483
 */
Philippe Gerum's avatar
Philippe Gerum committed
484 485 486 487 488 489
int rt_task_delete(RT_TASK *task)
{
	struct alchemy_task *tcb;
	struct service svc;
	int ret;

Gilles Chanteperdrix's avatar
Gilles Chanteperdrix committed
490
	if (threadobj_irq_p())
Philippe Gerum's avatar
Philippe Gerum committed
491 492 493 494 495 496
		return -EPERM;

	tcb = find_alchemy_task_or_self(task, &ret);
	if (tcb == NULL)
		return ret;

497
	CANCEL_DEFER(svc);
498
	threadobj_lock(&tcb->thobj);
499 500
	/* Self-deletion is handled by threadobj_cancel(). */
	threadobj_cancel(&tcb->thobj);
501
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
502

503
	return 0;
Philippe Gerum's avatar
Philippe Gerum committed
504 505
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
/**
 * @fn int rt_task_join(RT_TASK *task)
 * @brief Wait on the termination of a real-time task.
 *
 * This service blocks the caller in non-real-time context until @a
 * task has terminated. All resources are released after successful
 * completion of this service.
 *
 * The specified task must have been created by the same process that
 * wants to join it, and the T_JOINABLE mode flag must have been set
 * on creation to rt_task_create().
 *
 * Tasks created with the T_JOINABLE flag shall be joined by a
 * subsequent call to rt_task_join() once successfully deleted, to
 * reclaim all resources.
 *
522
 * @param task The task descriptor.
523 524 525 526 527 528 529 530 531 532 533 534 535
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor.
 *
 * - -EINVAL is returned if the task was not created with T_JOINABLE
 * set or some other task is already waiting on the termination.
 *
 * - -EDEADLK is returned if @a task refers to the caller.
 *
 * - -ESRCH is returned if @a task no longer exists or refers to task
 * created by a different process.
 *
536
 * @apitags{mode-unrestricted, switch-primary}
537 538 539 540 541 542 543 544 545 546
 *
 * @note After successful completion of this service, it is neither
 * required nor valid to additionally invoke rt_task_delete() on the
 * same task.
 */
int rt_task_join(RT_TASK *task)
{
	if (bad_pointer(task))
		return -EINVAL;

547
	return -__RT(pthread_join(task->thread, NULL));
548 549
}

550 551 552 553 554 555 556
/**
 * @fn int rt_task_set_affinity(RT_TASK *task, const cpu_set_t *cpus)
 * @brief Set CPU affinity of real-time task.
 *
 * This calls makes @a task affine to the set of CPUs defined by @a
 * cpus.
 *
557 558
 * @param task The task descriptor.  If @a task is NULL, the CPU
 * affinity of the current task is changed.
559 560 561
 *
 * @param cpus The set of CPUs @a task should be affine to.
 *
562
 * @return Zero is returned upon success. Otherwise:
563 564 565 566 567 568 569 570 571 572
 *
 * - -EINVAL is returned if @a task is NULL but the caller is not a
 * Xenomai task, or if @a task is non-NULL but not a valid task
 * descriptor.
 *
 * - -EINVAL is returned if @a cpus contains no processors that are
 * currently physically on the system and permitted to the process
 * according to any restrictions that may be imposed by the "cpuset"
 * mechanism described in cpuset(7).
 *
573
 * @apitags{mode-unrestricted, switch-secondary}
574
 *
575
 * @note The caller must be an Alchemy task if @a task is NULL.
576 577 578 579 580 581 582
 */
int rt_task_set_affinity(RT_TASK *task, const cpu_set_t *cpus)
{
	struct alchemy_task *tcb;
	struct service svc;
	int ret;

583
	CANCEL_DEFER(svc);
584 585 586 587 588 589 590 591 592 593 594 595 596 597

	tcb = get_alchemy_task_or_self(task, &ret);
	if (tcb == NULL)
		goto out;

	tcb->affinity = *cpus;

	ret = sched_setaffinity(threadobj_get_pid(&tcb->thobj),
				sizeof(tcb->affinity), &tcb->affinity);
	if (ret)
		ret = -errno;

	put_alchemy_task(tcb);
out:
598
	CANCEL_RESTORE(svc);
599 600 601 602

	return ret;
}

603 604 605 606 607 608 609 610
/**
 * @fn int rt_task_start(RT_TASK *task, void (*entry)(void *arg), void *arg)
 * @brief Start a real-time task.
 *
 * This call starts execution of a task previously created by
 * rt_task_create(). This service causes the started task to leave the
 * initial dormant state.
 *
611
 * @param task The task descriptor.
612 613 614 615 616 617 618 619 620
 *
 * @param entry The address of the task entry point.
 *
 * @param arg A user-defined opaque argument @a entry will receive.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor.
 *
621
 * @apitags{mode-unrestricted, switch-primary}
622 623 624 625
 *
 * @note Starting an already started task leads to a nop, returning a
 * success status.
 */
Philippe Gerum's avatar
Philippe Gerum committed
626 627 628 629 630
int rt_task_start(RT_TASK *task,
		  void (*entry)(void *arg),
		  void *arg)
{
	struct alchemy_task *tcb;
631
	struct service svc;
632
	int ret;
Philippe Gerum's avatar
Philippe Gerum committed
633

634
	CANCEL_DEFER(svc);
635

Philippe Gerum's avatar
Philippe Gerum committed
636 637
	tcb = get_alchemy_task(task, &ret);
	if (tcb == NULL)
638
		goto out;
Philippe Gerum's avatar
Philippe Gerum committed
639 640 641

	tcb->entry = entry;
	tcb->arg = arg;
642 643 644 645 646 647 648 649 650
	ret = threadobj_start(&tcb->thobj);
	if (ret == -EIDRM)
		/*
		 * The started thread has run then exited, tcb->thobj
		 * is stale: don't touch it anymore.
		 */
		ret = 0;
	else
		put_alchemy_task(tcb);
651
out:
652
	CANCEL_DEFER(svc);
Philippe Gerum's avatar
Philippe Gerum committed
653

654
	return ret;
Philippe Gerum's avatar
Philippe Gerum committed
655 656
}

657 658 659 660
/**
 * @fn int rt_task_shadow(RT_TASK *task, const char *name, int prio, int mode)
 * @brief Turn caller into a real-time task.
 *
661 662 663
 * Set the calling thread personality to the Alchemy API, enabling the
 * full set of Alchemy services. Upon success, the caller is no more a
 * regular POSIX thread, but a Xenomai-extended thread.
664 665
 *
 * If @a prio is non-zero, the new task moves to Xenomai's real-time
666
 * FIFO scheduling class, aka SCHED_FIFO. If @a prio is zero, the task
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
 * moves to the regular SCHED_OTHER class.
 *
 * Running Xenomai tasks with zero priority is useful for running non
 * real-time processes which may invoke blocking real-time services,
 * such as pending on a semaphore, reading from a message queue or a
 * buffer, and so on.
 *
 * @param task If non-NULL, the address of a task descriptor which can
 * be later used to identify uniquely the task, upon success of this
 * call. If NULL, no descriptor is returned.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * task. When non-NULL and non-empty, a copy of this string is
 * used for indexing the task into the object registry.
 *
 * @param prio The base priority of the task. This value must be in
 * the [0 .. 99] range, where 0 is the lowest effective priority.
 *
685 686
 * @param mode The task shadowing mode. The following flags can be
 * OR'ed into this bitmask:
687
 *
688
 * - T_LOCK causes the current task to lock the scheduler before
689 690
 * returning to the caller, preventing all further involuntary task
 * switches on the current CPU. A call to rt_task_set_mode() from the
691 692
 * current task is required to drop this lock.
 *
693
 * - When running over the Cobalt core, T_WARNSW causes the SIGDEBUG
694 695 696 697
 * signal to be sent to the current task whenever it switches to the
 * secondary mode. This feature is useful to detect unwanted
 * migrations to the Linux domain. This flag has no effect over the
 * Mercury core.
698
 *
699
 * @return Zero is returned upon success. Otherwise:
700
 *
701
 * - -EINVAL is returned if @a prio is invalid.
702 703 704 705 706 707 708
 *
 * - -ENOMEM is returned if the system fails to get memory from the
 * main heap in order to create the task extension.
 *
 * - -EEXIST is returned if the @a name is conflicting with an already
 * registered task.
 *
709
 * - -EBUSY is returned if the caller is not a regular POSIX thread.
710
 *
711 712 713
 * - -EPERM is returned if this service was called from an invalid
 * context, e.g. interrupt handler.
 *
714
 * @apitags{pthread-only, switch-secondary, switch-primary}
715
 *
716 717 718 719
 * @sideeffect Over Cobalt, if the caller is a plain POSIX thread, it
 * is turned into a Xenomai _shadow_ thread, with full access to all
 * Cobalt services. The caller always returns from this service in
 * primary mode.
720 721 722 723
 *
 * @note Tasks can be referred to from multiple processes which all
 * belong to the same Xenomai session.
 */
Philippe Gerum's avatar
Philippe Gerum committed
724 725
int rt_task_shadow(RT_TASK *task, const char *name, int prio, int mode)
{
726
	struct threadobj *current = threadobj_current();
727
	struct sched_param_ex param_ex;
Philippe Gerum's avatar
Philippe Gerum committed
728 729
	struct alchemy_task *tcb;
	struct service svc;
730
	int policy, ret;
731
	pthread_t self;
Philippe Gerum's avatar
Philippe Gerum committed
732

733 734 735
	if (mode & ~(T_LOCK | T_WARNSW))
		return -EINVAL;

736
	CANCEL_DEFER(svc);
Philippe Gerum's avatar
Philippe Gerum committed
737

738 739 740 741 742 743 744 745 746
	/*
	 * This is ok to overlay the default TCB for the main thread
	 * assigned by Copperplate at init, but it is not to
	 * over-shadow a Xenomai thread. A valid TCB pointer with a
	 * zero magic identifies the default main TCB.
	 */
	if (current && threadobj_get_magic(current))
		return -EBUSY;

747 748 749 750 751 752 753 754 755 756 757 758 759 760
	/*
	 * Over Cobalt, the following call turns the current context
	 * into a dual-kernel thread. Do this early, since this will
	 * be required next for creating the TCB and running the
	 * prologue code (i.e. real-time mutexes and monitors are
	 * locked there).
	 */
	self = pthread_self();
	policy = prio ? SCHED_FIFO : SCHED_OTHER;
	param_ex.sched_priority = prio;
	ret = __bt(copperplate_renice_local_thread(self, policy, &param_ex));
	if (ret)
		goto out;

761
	ret = create_tcb(&tcb, task, name, prio, mode);
Philippe Gerum's avatar
Philippe Gerum committed
762 763 764
	if (ret)
		goto out;

765
	CANCEL_RESTORE(svc);
766

767 768 769
	if (task)
		task->thread = self;

770
	ret = threadobj_shadow(&tcb->thobj, tcb->name);
771 772 773
	if (ret)
		goto undo;

774 775
	CANCEL_DEFER(svc);

776 777 778
	ret = task_prologue_2(tcb);
	if (ret)
		goto undo;
Philippe Gerum's avatar
Philippe Gerum committed
779
out:
780
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
781 782

	return ret;
783 784 785
undo:
	delete_tcb(tcb);
	goto out;
Philippe Gerum's avatar
Philippe Gerum committed
786 787
}

788 789 790 791 792 793 794 795 796
/**
 * @fn int rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
 * @brief Make a real-time task periodic.
 *
 * Make a task periodic by programing its first release point and its
 * period in the processor time line.  @a task should then call
 * rt_task_wait_period() to sleep until the next periodic release
 * point in the processor timeline is reached.
 *
797 798
 * @param task The task descriptor.  If @a task is NULL, the current
 * task is made periodic. @a task must belong the current process.
799 800
 *
 * @param idate The initial (absolute) date of the first release
801 802
 * point, expressed in clock ticks (see note).  If @a idate is equal
 * to TM_NOW, the current system date is used.
803 804 805 806 807
 *
 * @param period The period of the task, expressed in clock ticks (see
 * note). Passing TM_INFINITE stops the task's periodic timer if
 * enabled, then returns successfully.
 *
808
 * @return Zero is returned upon success. Otherwise:
809 810 811 812 813 814 815 816
 *
 * - -EINVAL is returned if @a task is NULL but the caller is not a
 * Xenomai task, or if @a task is non-NULL but not a valid task
 * descriptor.
 *
 * - -ETIMEDOUT is returned if @a idate is different from TM_INFINITE
 * and represents a date in the past.
 *
817
 * @apitags{mode-unrestricted, switch-primary}
818
 *
819
 * @note The caller must be an Alchemy task if @a task is NULL.
820
 *
821
 * @note Over Cobalt, -EINVAL is returned if @a period is
822 823 824
 * different from TM_INFINITE but shorter than the user scheduling
 * latency value for the target system, as displayed by
 * /proc/xenomai/latency.
825 826 827 828
 *
 * @note The @a idate and @a period values are interpreted as a
 * multiple of the Alchemy clock resolution (see
 * --alchemy-clock-resolution option, defaults to 1 nanosecond).
829 830 831 832 833
 *
 * @attention Unlike its Xenomai 2.x counterpart,
 * rt_task_set_periodic() will @b NOT block @a task until @a idate is
 * reached. The first beat in the periodic timeline should be awaited
 * for by a call to rt_task_wait_period().
834
 */
835
#ifndef DOXYGEN_CPP
836 837
CURRENT_IMPL(int, rt_task_set_periodic,
	     (RT_TASK *task, RTIME idate, RTIME period))
838 839 840
#else
int rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
#endif
Philippe Gerum's avatar
Philippe Gerum committed
841
{
842
	struct timespec its, pts, now;
Philippe Gerum's avatar
Philippe Gerum committed
843 844 845 846
	struct alchemy_task *tcb;
	struct service svc;
	int ret;

847
	CANCEL_DEFER(svc);
Philippe Gerum's avatar
Philippe Gerum committed
848

849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
	if (period == TM_INFINITE) {
		pts.tv_sec = 0;
		pts.tv_nsec = 0;
		its = pts;
	} else {
		clockobj_ticks_to_timespec(&alchemy_clock, period, &pts);
		if (idate == TM_NOW) {
			__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
			timespec_add(&its, &now, &pts);
		} else
			/*
			 * idate is an absolute time specification
			 * already, so we want a direct conversion to
			 * timespec.
			 */
			clockobj_ticks_to_timespec(&alchemy_clock, idate, &its);
	}
866 867

	tcb = get_alchemy_task_or_self(task, &ret);
Philippe Gerum's avatar
Philippe Gerum committed
868 869 870
	if (tcb == NULL)
		goto out;

871 872 873 874 875
	if (!threadobj_local_p(&tcb->thobj)) {
		ret = -EINVAL;
		goto out;
	}

Philippe Gerum's avatar
Philippe Gerum committed
876
	ret = threadobj_set_periodic(&tcb->thobj, &its, &pts);
877
	put_alchemy_task(tcb);
Philippe Gerum's avatar
Philippe Gerum committed
878
out:
879
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
880 881 882 883

	return ret;
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897
/**
 * @fn int rt_task_wait_period(unsigned long *overruns_r)
 * @brief Wait for the next periodic release point.
 *
 * Delay the current task until the next periodic release point is
 * reached. The periodic timer should have been previously started for
 * @a task by a call to rt_task_set_periodic().
 *
 * @param overruns_r If non-NULL, @a overruns_r shall be a pointer to
 * a memory location which will be written with the count of pending
 * overruns. This value is written to only when rt_task_wait_period()
 * returns -ETIMEDOUT or success. The memory location remains
 * unmodified otherwise. If NULL, this count will not be returned.
 *
898 899 900
 * @return Zero is returned upon success. If @a overruns_r is
 * non-NULL, zero is written to the pointed memory
 * location. Otherwise:
901 902 903 904 905 906 907 908 909 910 911 912 913
 *
 * - -EWOULDBLOCK is returned if rt_task_set_periodic() was not called
 * for the current task.
 *
 * - -EINTR is returned if rt_task_unblock() was called for the
 * waiting task before the next periodic release point was reached. In
 * this case, the overrun counter is also cleared.
 *
 * - -ETIMEDOUT is returned if a timer overrun occurred, which
 * indicates that a previous release point was missed by the calling
 * task. If @a overruns_r is non-NULL, the count of pending overruns
 * is written to the pointed memory location.
 *
914 915 916
 * - -EPERM is returned if this service was called from an invalid
 * context.
 *
917
 * @apitags{xthread-only, switch-primary}
918 919 920 921 922
 *
 * @note If the current release point has already been reached at the
 * time of the call, the current task immediately returns from this
 * service with no delay.
 */
Philippe Gerum's avatar
Philippe Gerum committed
923 924
int rt_task_wait_period(unsigned long *overruns_r)
{
925
	if (!threadobj_current_p())
Philippe Gerum's avatar
Philippe Gerum committed
926 927
		return -EPERM;

928
	return threadobj_wait_period(overruns_r);
Philippe Gerum's avatar
Philippe Gerum committed
929 930
}

931 932 933 934 935 936 937 938 939
/**
 * @fn int rt_task_sleep_until(RTIME date)
 * @brief Delay the current real-time task (with absolute wakeup date).
 *
 * Delay the execution of the calling task until a given date is
 * reached. The caller is put to sleep, and does not consume any CPU
 * time in such a state.
 *
 * @param date An absolute date expressed in clock ticks, specifying a
940 941 942 943
 * wakeup date (see note). As a special case, TM_INFINITE is an
 * acceptable value that causes the caller to block indefinitely,
 * until rt_task_unblock() is called against it. Otherwise, any wake
 * up date in the past causes the task to return immediately.
944 945 946 947 948 949 950 951 952 953 954
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task.
 *
 * - -ETIMEDOUT is returned if @a date has already elapsed.
 *
 * - -EPERM is returned if this service was called from an invalid
 * context.
 *
955
 * @apitags{xthread-only, switch-primary}
956
 *
957
 * @note The caller must be an Alchemy task if @a task is NULL.
958 959 960 961 962
 *
 * @note The @a date value is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
Philippe Gerum's avatar
Philippe Gerum committed
963 964 965
int rt_task_sleep_until(RTIME date)
{
	struct timespec ts;
966
	struct service svc;
Philippe Gerum's avatar
Philippe Gerum committed
967 968
	ticks_t now;

969
	if (!threadobj_current_p())
Philippe Gerum's avatar
Philippe Gerum committed
970 971
		return -EPERM;

972 973 974
	if (date == TM_INFINITE)
		ts = zero_time;
	else {
975
		now = clockobj_get_time(&alchemy_clock);
976
		if (date <= now)
Philippe Gerum's avatar
Philippe Gerum committed
977
			return -ETIMEDOUT;
978
		CANCEL_DEFER(svc);
979
		clockobj_ticks_to_timespec(&alchemy_clock, date, &ts);
980
		CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
981 982
	}

983
	return threadobj_sleep(&ts);
Philippe Gerum's avatar
Philippe Gerum committed
984
}
Philippe Gerum's avatar
Philippe Gerum committed
985

986 987 988 989 990 991 992 993 994 995 996 997 998
/**
 * @fn int rt_task_sleep(RTIME delay)
 * @brief Delay the current real-time task (with relative delay).
 *
 * This routine is a variant of rt_task_sleep_until() accepting a
 * relative timeout specification.
 *
 * @param delay A relative delay expressed in clock ticks (see
 * note). A zero delay causes this service to return immediately to
 * the caller with a success status.
 *
 * @return See rt_task_sleep_until().
 *
999 1000
 * @apitags{xthread-only, switch-primary}
 *
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
 * @note The @a delay value is interpreted as a multiple of the
 * Alchemy clock resolution (see --alchemy-clock-resolution option,
 * defaults to 1 nanosecond).
 */
int rt_task_sleep(RTIME delay)
{
	struct timespec ts;
	struct service svc;

	if (!threadobj_current_p())
		return -EPERM;

	if (delay == 0)
		return 0;

1016
	CANCEL_DEFER(svc);
1017
	clockobj_ticks_to_timeout(&alchemy_clock, delay, &ts);
1018
	CANCEL_RESTORE(svc);
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

	return threadobj_sleep(&ts);
}

/**
 * @fn int rt_task_spawn(RT_TASK *task, const char *name, int stksize, int prio, int mode, void (*entry)(void *arg), void *arg)
 * @brief Create and start a real-time task.
 *
 * This service spawns a task by combining calls to rt_task_create()
 * and rt_task_start() for the new task.
 *
 * @param task The address of a task descriptor which can be later
 * used to identify uniquely the created object, upon success of this
 * call.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * task. When non-NULL and non-empty, a copy of this string is
 * used for indexing the created task into the object registry.
 *
 * @param stksize The size of the stack (in bytes) for the new
 * task. If zero is passed, a system-dependent default size will be
 * substituted.
 *
 * @param prio The base priority of the new task. This value must be
 * in the [0 .. 99] range, where 0 is the lowest effective priority. 
 *
 * @param mode The task creation mode. See rt_task_create().
 *
 * @param entry The address of the task entry point.
 *
 * @param arg A user-defined opaque argument @a entry will receive.
 *
 * @return See rt_task_create().
 *
1053
 * @apitags{mode-unrestricted, switch-secondary}
1054
 *
1055
 * @sideeffect see rt_task_create().
1056
 */
1057
#ifndef DOXYGEN_CPP
1058 1059 1060 1061
CURRENT_IMPL(int, rt_task_spawn, (RT_TASK *task, const char *name,
				  int stksize, int prio, int mode,
				  void (*entry)(void *arg),
				  void *arg))
1062 1063 1064 1065 1066 1067
#else
int rt_task_spawn(RT_TASK *task, const char *name,
		  int stksize, int prio, int mode,
		  void (*entry)(void *arg),
		  void *arg)
#endif
Philippe Gerum's avatar
Philippe Gerum committed
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
{
	int ret;

	ret = rt_task_create(task, name, stksize, prio, mode);
	if (ret)
		return ret;

	return rt_task_start(task, entry, arg);
}

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
/**
 * @fn int rt_task_same(RT_TASK *task1, RT_TASK *task2)
 * @brief Compare real-time task descriptors.
 *
 * This predicate returns true if @a task1 and @a task2 refer to the
 * same task.
 *
 * @param task1 First task descriptor to compare.
 *
 * @param task2 Second task descriptor to compare.
 *
 * @return A non-zero value is returned if both descriptors refer to
 * the same task, zero otherwise.
1091 1092
 *
 * @apitags{unrestricted}
1093
 */
Philippe Gerum's avatar
Philippe Gerum committed
1094 1095 1096 1097 1098
int rt_task_same(RT_TASK *task1, RT_TASK *task2)
{
	return task1->handle == task2->handle;
}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
/**
 * @fn int rt_task_suspend(RT_TASK *task)
 * @brief Suspend a real-time task.
 *
 * Forcibly suspend the execution of a task. This task will not be
 * eligible for scheduling until it is explicitly resumed by a call to
 * rt_task_resume(). In other words, the suspended state caused by a
 * call to rt_task_suspend() is cumulative with respect to the delayed
 * and blocked states caused by other services, and is managed
 * separately from them.
 *
 * A nesting count is maintained so that rt_task_suspend() and
 * rt_task_resume() must be used in pairs.
 *
 * Receiving a Linux signal causes the suspended task to resume
 * immediately.
 *
1116 1117
 * @param task The task descriptor. If @a task is NULL, the current
 * task is suspended.
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is NULL but the caller is not a
 * Xenomai task, or if @a task is non-NULL but not a valid task
 * descriptor.
 *
 * - -EINTR is returned if a Linux signal has been received by the
 * caller if suspended.
 *
 * - -EPERM is returned if @a task is NULL and this service was called
 * from an invalid context.
 *
1131
 * @apitags{mode-unrestricted, switch-primary}
1132
 *
1133
 * @note The caller must be an Alchemy task if @a task is NULL.
1134 1135 1136 1137 1138 1139 1140
 *
 * @note Blocked and suspended task states are cumulative. Therefore,
 * suspending a task currently waiting on a synchronization object
 * (e.g. semaphore, queue) holds its execution until it is resumed,
 * despite the awaited resource may have been acquired, or a timeout
 * has elapsed in the meantime.
 */
Philippe Gerum's avatar
Philippe Gerum committed
1141 1142 1143 1144 1145 1146
int rt_task_suspend(RT_TASK *task)
{
	struct alchemy_task *tcb;
	struct service svc;
	int ret;

1147
	CANCEL_DEFER(svc);
1148

Philippe Gerum's avatar
Philippe Gerum committed
1149 1150
	tcb = get_alchemy_task_or_self(task, &ret);
	if (tcb == NULL)
1151
		goto out;
Philippe Gerum's avatar
Philippe Gerum committed
1152

1153 1154 1155
	if (tcb->suspends++ == 0)
		ret = threadobj_suspend(&tcb->thobj);

Philippe Gerum's avatar
Philippe Gerum committed
1156
	put_alchemy_task(tcb);
1157
out:
1158
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
1159 1160 1161 1162

	return ret;
}

1163 1164 1165 1166 1167 1168 1169 1170
/**
 * @fn int rt_task_resume(RT_TASK *task)
 * @brief Resume a real-time task.
 *
 * Forcibly resume the execution of a task which was previously
 * suspended by a call to rt_task_suspend(), if the suspend nesting
 * count decrements to zero.
 *
1171
 * @param task The task descriptor.
1172 1173 1174 1175 1176
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor.
 *
1177
 * @apitags{unrestricted, switch-primary}
1178 1179 1180 1181 1182 1183 1184
 *
 * @note Blocked and suspended task states are cumulative. Therefore,
 * resuming a task currently waiting on a synchronization object
 * (e.g. semaphore, queue) does not make it eligible for scheduling
 * until the awaited resource is eventually acquired, or a timeout
 * elapses.
 */
Philippe Gerum's avatar
Philippe Gerum committed
1185 1186 1187 1188
int rt_task_resume(RT_TASK *task)
{
	struct alchemy_task *tcb;
	struct service svc;
1189
	int ret = 0;
Philippe Gerum's avatar
Philippe Gerum committed
1190

1191
	CANCEL_DEFER(svc);
1192

Philippe Gerum's avatar
Philippe Gerum committed
1193 1194
	tcb = get_alchemy_task(task, &ret);
	if (tcb == NULL)
1195
		goto out;
Philippe Gerum's avatar
Philippe Gerum committed
1196

1197 1198 1199
	if (tcb->suspends > 0 && --tcb->suspends == 0)
		ret = threadobj_resume(&tcb->thobj);

Philippe Gerum's avatar
Philippe Gerum committed
1200
	put_alchemy_task(tcb);
1201
out:
1202
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
1203 1204 1205 1206

	return ret;
}

1207 1208 1209 1210 1211 1212 1213
/**
 * @fn RT_TASK *rt_task_self(void)
 * @brief Retrieve the current task descriptor.
 *
 * Return the address of the current Alchemy task descriptor.
 *
 * @return The address of the task descriptor referring to the current
1214 1215
 * Alchemy task is returned upon success, or NULL if not called from a
 * valid Alchemy task context.
1216
 *
1217
 * @apitags{xthread-only}
1218
 */
Philippe Gerum's avatar
Philippe Gerum committed
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
RT_TASK *rt_task_self(void)
{
	struct alchemy_task *tcb;

	tcb = alchemy_task_current();
	if (tcb == NULL)
		return NULL;

	return &tcb->self;
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
/**
 * @fn int rt_task_set_priority(RT_TASK *task, int prio)
 * @brief Change the base priority of a real-time task.
 *
 * The base priority of a task defines the relative importance of the
 * work being done by each task, which gains conrol of the CPU
 * accordingly.
 *
 * Changing the base priority of a task does not affect the priority
 * boost the target task might have obtained as a consequence of a
 * priority inheritance undergoing.
 *
1242 1243
 * @param task The task descriptor. If @a task is NULL, the priority
 * of the current task is changed.
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
 *
 * @param prio The new priority. This value must range from [T_LOPRIO
 * .. T_HIPRIO] (inclusive) where T_LOPRIO is the lowest effective
 * priority.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor, or
 * if @a prio is invalid.
 *
 * - -EPERM is returned if @a task is NULL and this service was called
 * from an invalid context.
 *
1257
 * @apitags{mode-unrestricted, switch-primary, switch-secondary}
1258
 *
1259
 * @note The caller must be an Alchemy task if @a task is NULL.
1260 1261 1262 1263 1264
 *
 * @note Assigning the same priority to a running or ready task moves
 * it to the end of its priority group, thus causing a manual
 * round-robin.
 */
Philippe Gerum's avatar
Philippe Gerum committed
1265 1266
int rt_task_set_priority(RT_TASK *task, int prio)
{
1267
	struct sched_param_ex param_ex;
Philippe Gerum's avatar
Philippe Gerum committed
1268
	struct alchemy_task *tcb;
1269
	struct service svc;
1270
	int policy, ret;
Philippe Gerum's avatar
Philippe Gerum committed
1271 1272 1273 1274 1275

	ret = check_task_priority(prio);
	if (ret)
		return ret;

1276
	CANCEL_DEFER(svc);
1277

Philippe Gerum's avatar
Philippe Gerum committed
1278 1279
	tcb = get_alchemy_task_or_self(task, &ret);
	if (tcb == NULL)
1280
		goto out;
Philippe Gerum's avatar
Philippe Gerum committed
1281

1282 1283 1284
	policy = prio ? SCHED_FIFO : SCHED_OTHER;
	param_ex.sched_priority = prio;
	ret = threadobj_set_schedparam(&tcb->thobj, policy, &param_ex);
1285 1286 1287 1288 1289 1290 1291
	switch (ret) {
	case -EIDRM:
		ret = 0;
		break;
	default:
		put_alchemy_task(tcb);
	}
1292
out:
1293
	CANCEL_RESTORE(svc);
Philippe Gerum's avatar
Philippe Gerum committed
1294 1295 1296

	return ret;
}
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
/**
 * @fn int rt_task_yield(void)
 * @brief Manual round-robin.
 *
 * Move the current task to the end of its priority group, so that the
 * next equal-priority task in ready state is switched in.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EPERM is returned if this service was called from an invalid
 * context.
 *
1310
 * @apitags{xthread-only, switch-primary}
1311
 */
1312 1313
int rt_task_yield(void)
{
1314
	if (!threadobj_current_p())
1315 1316 1317 1318 1319 1320 1321
		return -EPERM;

	threadobj_yield();

	return 0;
}

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
/**
 * @fn int rt_task_unblock(RT_TASK *task)
 * @brief Unblock a real-time task.
 *
 * Break the task out of any wait it is currently in.  This call
 * clears all delay and/or resource wait condition for the target
 * task.
 *
 * However, rt_task_unblock() does not resume a task which has been
 * forcibly suspended by a previous call to rt_task_suspend().  If all
 * suspensive conditions are gone, the task becomes eligible anew for
 * scheduling.
 *
1335
 * @param task The task descriptor.
1336 1337 1338 1339 1340
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a task is not a valid task descriptor.
 *
1341
 * @apitags{unrestricted, switch-primary}
1342
 */
1343 1344 1345 1346 1347 1348
int rt_task_unblock(RT_TASK *task)
{
	struct alchemy_task *tcb;
	struct service svc;
	int ret;

1349
	CANCEL_DEFER(svc);
1350

1351 1352
	tcb = get_alchemy_task(task, &ret);
	if (tcb == NULL)
1353
		goto out;
1354 1355 1356

	ret = threadobj_unblock(&tcb->thobj);
	put_alchemy_task(tcb);