Commit d9ff7382 authored by Philippe Gerum's avatar Philippe Gerum Committed by Jan Kiszka

lib: remove remaining likely/unlikely branch annotations

In user-space at least, we'd be better off trusting the CPU's branch
predictor, instead of relying on our limited perception when it comes
to determining the likeliness of a condition, or every compiler to do
the right thing with respect to efficient branching.

We only have a few unlikely predictions in-tree on straightforward
conditions, which we can remove safely:

- POSIX condvars wait/signal loops on x86, arm and arm64 showed no
  observable performance penalty.

- other callers from the thread cancellation path, or debug
  instrumentation are slow paths in essence anyway.
Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
Signed-off-by: Jan Kiszka's avatarJan Kiszka <jan.kiszka@siemens.com>
parent db23c690
......@@ -76,7 +76,7 @@ void debug_init(void);
#define __bt(__exp) \
({ \
typeof(__exp) __ret = (__exp); \
if (unlikely(__ret < 0)) \
if (__ret < 0) \
backtrace_log((int)__ret, __FUNCTION__, \
__FILE__, __LINE__); \
__ret; \
......@@ -85,7 +85,7 @@ void debug_init(void);
#define __bterrno(__exp) \
({ \
typeof(__exp) __ret = (__exp); \
if (unlikely(__ret < 0)) \
if (__ret < 0) \
backtrace_log(-errno, __FUNCTION__, \
__FILE__, __LINE__); \
__ret; \
......
......@@ -167,7 +167,7 @@ static int __attribute__((cold))
static inline int cobalt_cond_autoinit(union cobalt_cond_union *ucond)
{
if (unlikely(ucond->shadow_cond.magic != COBALT_COND_MAGIC))
if (ucond->shadow_cond.magic != COBALT_COND_MAGIC)
return cobalt_cond_doautoinit(ucond);
return 0;
}
......@@ -201,7 +201,7 @@ COBALT_IMPL(int, pthread_cond_destroy, (pthread_cond_t *cond))
struct cobalt_cond_shadow *_cond =
&((union cobalt_cond_union *)cond)->shadow_cond;
if (unlikely(_cond->magic != COBALT_COND_MAGIC))
if (_cond->magic != COBALT_COND_MAGIC)
return (cobalt_cond_autoinit_type(cond) < 0) ? EINVAL : 0;
return -XENOMAI_SYSCALL1( sc_cobalt_cond_destroy, _cond);
......
......@@ -240,7 +240,7 @@ static int __attribute__((cold))
static inline int cobalt_mutex_autoinit(union cobalt_mutex_union *umutex)
{
if (unlikely(umutex->shadow_mutex.magic != COBALT_MUTEX_MAGIC))
if (umutex->shadow_mutex.magic != COBALT_MUTEX_MAGIC)
return cobalt_mutex_doautoinit(umutex);
return 0;
}
......@@ -273,7 +273,7 @@ COBALT_IMPL(int, pthread_mutex_destroy, (pthread_mutex_t *mutex))
&((union cobalt_mutex_union *)mutex)->shadow_mutex;
int err;
if (unlikely(_mutex->magic != COBALT_MUTEX_MAGIC))
if (_mutex->magic != COBALT_MUTEX_MAGIC)
return (cobalt_mutex_autoinit_type(mutex) < 0) ? EINVAL : 0;
err = XENOMAI_SYSCALL1(sc_cobalt_mutex_destroy, _mutex);
......
......@@ -1022,7 +1022,7 @@ static int request_setschedparam(struct threadobj *thobj, int policy,
#ifdef CONFIG_XENO_PSHARED
struct remote_request *rq;
if (unlikely(!threadobj_local_p(thobj))) {
if (!threadobj_local_p(thobj)) {
rq = xnmalloc(sizeof(*rq));
if (rq == NULL)
return -ENOMEM;
......@@ -1065,7 +1065,7 @@ static int request_cancel(struct threadobj *thobj) /* thobj->lock held, dropped.
struct remote_request *rq;
int ret;
if (unlikely(!threadobj_local_p(thobj))) {
if (!threadobj_local_p(thobj)) {
threadobj_unlock(thobj);
rq = xnmalloc(sizeof(*rq));
if (rq == NULL)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment