Commit 08af0a34 authored by Philippe Gerum's avatar Philippe Gerum

cobalt/sched: add start, stop SCHED_TP config operations

We may want to start/stop a TP schedule without removing it from the
CPU entirely. To this end, this commit introduces the
sched_tp_start/stop requests, processed by sched_config_np().
parent 7c4492ac
......@@ -61,6 +61,7 @@ struct compat_sched_tp_window {
};
struct __compat_sched_config_tp {
int op;
int nr_windows;
struct compat_sched_tp_window windows[0];
};
......
......@@ -57,7 +57,15 @@ struct sched_tp_window {
int ptid;
};
enum {
sched_tp_install,
sched_tp_uninstall,
sched_tp_start,
sched_tp_stop,
};
struct __sched_config_tp {
int op;
int nr_windows;
struct sched_tp_window windows[0];
};
......
......@@ -242,14 +242,35 @@ int set_tp_config(int cpu, union sched_config *config, size_t len)
if (len < sizeof(config->tp))
return -EINVAL;
if (config->tp.nr_windows == 0) {
sched = xnsched_struct(cpu);
switch (config->tp.op) {
case sched_tp_install:
if (config->tp.nr_windows > 0)
break;
/* Fallback wanted. */
case sched_tp_uninstall:
gps = NULL;
goto set_schedule;
case sched_tp_start:
xnlock_get_irqsave(&nklock, s);
xnsched_tp_start_schedule(sched);
xnlock_put_irqrestore(&nklock, s);
return 0;
case sched_tp_stop:
xnlock_get_irqsave(&nklock, s);
xnsched_tp_stop_schedule(sched);
xnlock_put_irqrestore(&nklock, s);
return 0;
default:
return -EINVAL;
}
/* Install a new TP schedule on CPU. */
gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w));
if (gps == NULL)
goto fail;
return -ENOMEM;
for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0;
n < config->tp.nr_windows; n++, p++, w++) {
......@@ -279,10 +300,8 @@ int set_tp_config(int cpu, union sched_config *config, size_t len)
gps->pwin_nr = n;
gps->tf_duration = next_offset;
set_schedule:
sched = xnsched_struct(cpu);
xnlock_get_irqsave(&nklock, s);
ogps = xnsched_tp_set_schedule(sched, gps);
xnsched_tp_start_schedule(sched);
xnlock_put_irqrestore(&nklock, s);
if (ogps)
......@@ -292,7 +311,7 @@ set_schedule:
cleanup_and_fail:
xnfree(gps);
fail:
return -EINVAL;
}
......@@ -333,6 +352,7 @@ ssize_t get_tp_config(int cpu, void __user *u_config, size_t len,
goto out;
}
config->tp.op = sched_tp_install;
config->tp.nr_windows = gps->pwin_nr;
for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins;
n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) {
......
......@@ -341,6 +341,7 @@ sys32_fetch_config(int policy, const void __user *u_config, size_t *len)
if (policy == SCHED_QUOTA)
memcpy(&buf->quota, &cbuf->quota, sizeof(cbuf->quota));
else {
buf->tp.op = cbuf->tp.op;
buf->tp.nr_windows = cbuf->tp.nr_windows;
for (n = 0; n < buf->tp.nr_windows; n++) {
buf->tp.windows[n].ptid = cbuf->tp.windows[n].ptid;
......@@ -387,9 +388,12 @@ static ssize_t sys32_put_config(int policy,
sizeof(u_p->quota.info);
}
/* SCHED_TP */
if (u_len < compat_sched_tp_confsz(config->tp.nr_windows))
return -ENOSPC;
__xn_put_user(config->tp.op, &u_p->tp.op);
__xn_put_user(config->tp.nr_windows, &u_p->tp.nr_windows);
for (n = 0, ret = 0; n < config->tp.nr_windows; n++) {
......
......@@ -221,16 +221,21 @@ void xnsched_tp_start_schedule(struct xnsched *sched)
{
struct xnsched_tp *tp = &sched->tp;
if (tp->gps == NULL)
return;
tp->wnext = 0;
tp->tf_start = xnclock_read_monotonic(&nkclock);
tp_schedule_next(&sched->tp);
tp_schedule_next(tp);
}
EXPORT_SYMBOL_GPL(xnsched_tp_start_schedule);
void xnsched_tp_stop_schedule(struct xnsched *sched)
{
struct xnsched_tp *tp = &sched->tp;
xntimer_stop(&tp->tf_timer);
if (tp->gps)
xntimer_stop(&tp->tf_timer);
}
EXPORT_SYMBOL_GPL(xnsched_tp_stop_schedule);
......
......@@ -979,32 +979,64 @@ COBALT_IMPL(int, pthread_yield, (void))
*
* @par Settings applicable to SCHED_TP
*
* This call installs the temporal partitions for @a cpu.
*
* - config.tp.windows should be a non-null set of time windows,
* defining the scheduling time slots for @a cpu. Each window defines
* its offset from the start of the global time frame
* (windows[].offset), a duration (windows[].duration), and the
* partition id it applies to (windows[].ptid).
*
* Time windows must be strictly contiguous, i.e. windows[n].offset +
* windows[n].duration shall equal windows[n + 1].offset.
* If windows[].ptid is in the range
* This call controls the temporal partitions for @a cpu, depending on
* the operation requested.
*
* - config.tp.op specifies the operation to perform:
*
* - @a sched_tp_install installs a new TP schedule on @a cpu, defined
* by config.tp.windows[]. The global time frame is not activated
* upon return from this request yet; @a sched_tp_start must be
* issued to activate the temporal scheduling on @a CPU.
*
* - @a sched_tp_uninstall removes the current TP schedule from @a
* cpu, releasing all the attached resources. If no TP schedule
* exists on @a CPU, this request has no effect.
*
* - @a sched_tp_start enables the temporal scheduling on @a cpu,
* starting the global time frame. If no TP schedule exists on @a cpu,
* this action has no effect.
*
* - @a sched_tp_stop disables the temporal scheduling on @a cpu. The
* current TP schedule is not uninstalled though, and may be
* re-started later by a @a sched_tp_start request.
* @caution As a consequence of this request, threads assigned to the
* un-scheduled partitions may be starved from CPU time.
*
* - for a @a sched_tp_install operation, config.tp.nr_windows
* indicates the number of elements present in the config.tp.windows[]
* array. If config.tp.nr_windows is zero, the action taken is
* identical to @a sched_tp_uninstall.
*
* - if config.tp.nr_windows is non-zero, config.tp.windows[] is a set
* scheduling time slots for threads assigned to @a cpu. Each window
* is specified by its offset from the start of the global time frame
* (windows[].offset), its duration (windows[].duration), and the
* partition id it should activate during such period of time
* (windows[].ptid). This field is not considered for other requests
* than @a sched_tp_install.
*
* Time slots must be strictly contiguous, i.e. windows[n].offset +
* windows[n].duration shall equal windows[n + 1].offset. If
* windows[].ptid is in the range
* [0..CONFIG_XENO_OPT_SCHED_TP_NRPART-1], SCHED_TP threads which
* belong to the partition being referred to may run for the duration
* of the time window.
*
* Time holes may be defined using windows assigned to the pseudo
* partition #-1, during which no SCHED_TP threads may be scheduled.
* belong to the partition being referred to may be given CPU time on
* @a cpu, from time windows[].offset to windows[].offset +
* windows[].duration, provided those threads are in a runnable state.
*
* - config.tp.nr_windows should define the number of elements present
* in the config.tp.windows[] array.
* Time holes between valid time slots may be defined using windows
* activating the pseudo partition -1. When such window is active in
* the global time frame, no CPU time is available to SCHED_TP threads
* on @a cpu.
*
* @a info is ignored for this request.
* @note The sched_tp_confsz(nr_windows) macro returns the length of
* config.tp depending on the number of time slots to be defined in
* config.tp.windows[], as specified by config.tp.nr_windows.
*
* @par Settings applicable to SCHED_QUOTA
*
* This call manages thread groups running on @a cpu.
* This call manages thread groups running on @a cpu, defining
* per-group quota for limiting their CPU consumption.
*
* - config.quota.op should define the operation to be carried
* out. Valid operations are:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment