Commit 9027ec94 authored by Philippe Gerum's avatar Philippe Gerum

cobalt/sched: introduce SCHED_QUOTA policy

The SCHED_QUOTA policy enforces a limitation on the CPU consumption of
threads over a globally defined period, known as the quota
interval. This is done by pooling threads with common requirements in
groups, and giving each group a share of the global period
(CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).

When threads have entirely consumed the quota allotted to the group
they belong to, the latter is suspended as a whole, until the next
quota interval starts. At this point, a new runtime budget is given to
each group, in accordance with its share.
parent fe84ba35
......@@ -18,6 +18,7 @@ noinst_HEADERS = \
sched-idle.h \
schedparam.h \
schedqueue.h \
sched-quota.h \
sched-rt.h \
sched-sporadic.h \
sched-tp.h \
......
......@@ -322,6 +322,7 @@ noinst_HEADERS = \
sched-idle.h \
schedparam.h \
schedqueue.h \
sched-quota.h \
sched-rt.h \
sched-sporadic.h \
sched-tp.h \
......
/**
* Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
*
* Xenomai is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* Xenomai is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Xenomai; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#ifndef _COBALT_KERNEL_SCHED_QUOTA_H
#define _COBALT_KERNEL_SCHED_QUOTA_H
#ifndef _COBALT_KERNEL_SCHED_H
#error "please don't include cobalt/kernel/sched-quota.h directly"
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
extern struct xnsched_class xnsched_class_quota;
struct xnsched_quota_group {
struct xnsched *sched;
xnticks_t quota_ns;
xnticks_t quota_peak_ns;
xnticks_t run_start_ns;
xnticks_t run_budget_ns;
xnticks_t run_credit_ns;
struct list_head expired;
struct list_head next;
int nr_active;
int nr_threads;
int tgid;
};
struct xnsched_quota {
xnticks_t period_ns;
struct xntimer refill_timer;
struct xntimer limit_timer;
xnsched_queue_t runnable;
struct list_head groups;
};
static inline int xnsched_quota_init_thread(struct xnthread *thread)
{
thread->quota = NULL;
INIT_LIST_HEAD(&thread->quota_expired);
return 0;
}
int xnsched_quota_create_group(struct xnsched_quota_group *tg,
struct xnsched *sched);
int xnsched_quota_destroy_group(struct xnsched_quota_group *tg);
void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
int quota_percent, int quota_peak_percent);
struct xnsched_quota_group *
xnsched_quota_find_group(struct xnsched *sched, int tgid);
#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */
......@@ -34,6 +34,7 @@
#include <cobalt/kernel/sched-tp.h>
#include <cobalt/kernel/sched-weak.h>
#include <cobalt/kernel/sched-sporadic.h>
#include <cobalt/kernel/sched-quota.h>
#include <cobalt/kernel/vfile.h>
#include <cobalt/kernel/assert.h>
#include <asm/xenomai/machine.h>
......@@ -83,6 +84,10 @@ struct xnsched {
#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
/*!< Context of sporadic scheduling class. */
struct xnsched_sporadic pss;
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
/*!< Context of runtime quota scheduling. */
struct xnsched_quota quota;
#endif
/*!< Interrupt nesting level. */
volatile unsigned inesting;
......@@ -487,6 +492,7 @@ static inline int xnsched_init_thread(struct xnthread *thread)
xnsched_idle_init_thread(thread);
xnsched_rt_init_thread(thread);
#ifdef CONFIG_XENO_OPT_SCHED_TP
ret = xnsched_tp_init_thread(thread);
if (ret)
......@@ -497,6 +503,12 @@ static inline int xnsched_init_thread(struct xnthread *thread)
if (ret)
return ret;
#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
ret = xnsched_quota_init_thread(thread);
if (ret)
return ret;
#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
return ret;
}
......
......@@ -46,6 +46,11 @@ struct xnsched_sporadic_param {
int current_prio;
};
struct xnsched_quota_param {
int prio;
int tgid; /* thread group id. */
};
union xnsched_policy_param {
struct xnsched_idle_param idle;
struct xnsched_rt_param rt;
......@@ -58,6 +63,9 @@ union xnsched_policy_param {
#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
struct xnsched_sporadic_param pss;
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
struct xnsched_quota_param quota;
#endif
};
#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */
......@@ -68,6 +68,13 @@ static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
return q->himap == 0;
}
static inline int xnsched_weightq(struct xnsched_mlq *q)
{
int hi = ffnz(q->himap);
int lo = ffnz(q->lomap[hi]);
return hi * BITS_PER_LONG + lo; /* Result is undefined if none set. */
}
typedef struct xnsched_mlq xnsched_queue_t;
#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
......@@ -86,6 +93,13 @@ typedef struct list_head xnsched_queue_t;
__t = list_get_entry(__q, struct xnthread, rlink); \
__t; \
})
#define xnsched_weightq(__q) \
({ \
struct xnthread *__t; \
__t = list_first_entry(__q, struct xnthread, rlink); \
__t->cprio; \
})
#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
......
......@@ -62,7 +62,7 @@ struct xnthread_wait_context {
typedef struct xnthread {
struct xnarchtcb tcb; /* Architecture-dependent block -- Must be first */
struct xnarchtcb tcb; /* Architecture-dependent block */
unsigned long state; /* Thread state flags */
......@@ -81,6 +81,10 @@ typedef struct xnthread {
#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
struct xnsched_quota_group *quota; /* Quota scheduling group. */
struct list_head quota_expired;
#endif
unsigned int idtag; /* Unique ID tag */
......
......@@ -51,15 +51,6 @@ struct __sched_tp_param {
int __sched_partition;
};
struct sched_param_ex {
int sched_priority;
union {
struct __sched_ss_param ss;
struct __sched_rr_param rr;
struct __sched_tp_param tp;
} sched_u;
};
struct sched_tp_window {
struct timespec offset;
struct timespec duration;
......@@ -71,8 +62,49 @@ struct __sched_config_tp {
struct sched_tp_window windows[0];
};
#ifndef SCHED_QUOTA
#define SCHED_QUOTA 12
#define sched_quota_group sched_u.quota.__sched_group
#endif /* !SCHED_QUOTA */
struct __sched_quota_param {
int __sched_group;
};
enum {
sched_quota_add,
sched_quota_remove,
sched_quota_set
};
struct __sched_config_quota {
int op;
struct {
int *tgid_r;
} add;
struct {
int tgid;
} remove;
struct {
int tgid;
int quota;
int quota_peak;
} set;
};
struct sched_param_ex {
int sched_priority;
union {
struct __sched_ss_param ss;
struct __sched_rr_param rr;
struct __sched_tp_param tp;
struct __sched_quota_param quota;
} sched_u;
};
union sched_config {
struct __sched_config_tp tp;
struct __sched_config_quota quota;
};
#endif /* !_COBALT_UAPI_SCHED_H */
......@@ -90,9 +90,12 @@ config XENO_OPT_SCHED_SPORADIC
depends on XENO_OPT_SCHED_CLASSES
help
This option enables support for the sporadic scheduling. It
can be used to enforce a capped limit on the execution time of
a thread within a given period of time.
This option enables support for the sporadic scheduling policy
in Xenomai (SCHED_SPORADIC), also known as POSIX sporadic
server.
It can be used to enforce a capped limit on the execution time
of a thread within a given period of time.
If in doubt, say N.
......@@ -109,6 +112,48 @@ config XENO_OPT_SCHED_SPORADIC_MAXREPL
be pending concurrently for any given thread that undergoes
sporadic scheduling (system minimum is 4).
config XENO_OPT_SCHED_QUOTA
bool "Thread groups with runtime quota"
default n
depends on XENO_OPT_SCHED_CLASSES
help
This option enables the SCHED_QUOTA scheduling policy for
Xenomai.
This policy enforces a limitation on the CPU consumption of
threads over a globally defined period, known as the quota
interval. This is done by pooling threads with common
requirements in groups, and giving each group a share of the
global period (see CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
When threads have entirely consumed the quota allotted to the
group they belong to, the latter is suspended as a whole,
until the next quota interval starts. At this point, a new
runtime budget is given to each group, in accordance with its
share.
If in doubt, say N.
config XENO_OPT_SCHED_QUOTA_PERIOD
int "Quota interval (us)"
default 10000
range 100 1000000000
depends on XENO_OPT_SCHED_QUOTA
help
The global period thread groups can get a share of.
config XENO_OPT_SCHED_QUOTA_NR_GROUPS
int "Number of thread groups"
default 32
range 1 1024
depends on XENO_OPT_SCHED_QUOTA
help
The overall number of thread groups which may be defined
across all CPUs.
config XENO_OPT_STATS
bool "Runtime statistics"
depends on XENO_OPT_VFILE
......
......@@ -19,6 +19,7 @@ xenomai-y := apc.o \
thread.o \
timer.o
xenomai-$(CONFIG_XENO_OPT_SCHED_QUOTA) += sched-quota.o
xenomai-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o
xenomai-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o
xenomai-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o
......
This diff is collapsed.
......@@ -44,13 +44,6 @@
struct cobalt_thread;
struct cobalt_threadstat;
struct cobalt_threadattr {
int policy;
struct sched_param_ex schedparam_ex;
const char *name;
cpumask_t affinity;
};
/*
* pthread_mutexattr_t and pthread_condattr_t fit on 32 bits, for
* compatibility with libc.
......@@ -101,9 +94,6 @@ struct cobalt_thread {
struct list_head link;
struct list_head *container;
/** Creation attributes. */
struct cobalt_threadattr attr;
/** Signal management. */
sigset_t sigpending;
struct list_head sigqueues[_NSIG]; /* cobalt_sigpending */
......
This diff is collapsed.
......@@ -71,6 +71,9 @@ void xnsched_register_classes(void)
#endif
#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
xnsched_register_class(&xnsched_class_sporadic);
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
xnsched_register_class(&xnsched_class_quota);
#endif
xnsched_register_class(&xnsched_class_rt);
}
......@@ -579,13 +582,6 @@ void xnsched_delq(struct xnsched_mlq *q, struct xnthread *thread)
del_q(q, &thread->rlink, get_qindex(q, thread->cprio));
}
static inline int ffs_q(struct xnsched_mlq *q)
{
int hi = ffnz(q->himap);
int lo = ffnz(q->lomap[hi]);
return hi * BITS_PER_LONG + lo; /* Result is undefined if none set. */
}
struct xnthread *xnsched_getq(struct xnsched_mlq *q)
{
struct xnthread *thread;
......@@ -595,7 +591,7 @@ struct xnthread *xnsched_getq(struct xnsched_mlq *q)
if (q->elems == 0)
return NULL;
idx = ffs_q(q);
idx = xnsched_weightq(q);
head = q->heads + idx;
XENO_BUGON(NUCLEUS, list_empty(head));
thread = list_first_entry(head, struct xnthread, rlink);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment