...
 
Commits (4)
......@@ -449,6 +449,17 @@ out:
}
}
static inline bool cobalt_owns_irq(int irq)
{
ipipe_irq_handler_t h;
h = __ipipe_irq_handler(&xnsched_realtime_domain, irq);
return h == xnintr_vec_handler ||
h == xnintr_edge_vec_handler ||
h == xnintr_irq_handler;
}
static inline int xnintr_irq_attach(struct xnintr *intr)
{
struct xnintr_vector *vec = vectors + intr->irq;
......@@ -538,9 +549,19 @@ struct xnintr_vector {
static struct xnintr_vector vectors[IPIPE_NR_IRQS];
static inline bool cobalt_owns_irq(int irq)
{
ipipe_irq_handler_t h;
h = __ipipe_irq_handler(&xnsched_realtime_domain, irq);
return h == xnintr_irq_handler;
}
static inline struct xnintr *xnintr_vec_first(unsigned int irq)
{
return __ipipe_irq_cookie(&xnsched_realtime_domain, irq);
return cobalt_owns_irq(irq) ?
__ipipe_irq_cookie(&xnsched_realtime_domain, irq) : NULL;
}
static inline struct xnintr *xnintr_vec_next(struct xnintr *prev)
......@@ -1067,6 +1088,7 @@ static inline int format_irq_proc(unsigned int irq,
struct xnvfile_regular_iterator *it)
{
struct xnintr *intr;
struct irq_desc *d;
int cpu;
for_each_realtime_cpu(cpu)
......@@ -1100,15 +1122,21 @@ static inline int format_irq_proc(unsigned int irq,
mutex_lock(&intrlock);
intr = xnintr_vec_first(irq);
if (intr) {
xnvfile_puts(it, " ");
do {
xnvfile_putc(it, ' ');
xnvfile_puts(it, intr->name);
intr = xnintr_vec_next(intr);
} while (intr);
if (!cobalt_owns_irq(irq)) {
xnvfile_puts(it, " ");
d = irq_to_desc(irq);
xnvfile_puts(it, d && d->name ? d->name : "-");
} else {
intr = xnintr_vec_first(irq);
if (intr) {
xnvfile_puts(it, " ");
do {
xnvfile_putc(it, ' ');
xnvfile_puts(it, intr->name);
intr = xnintr_vec_next(intr);
} while (intr);
}
}
mutex_unlock(&intrlock);
......
......@@ -1471,16 +1471,14 @@ int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
struct xnsched *sched;
spl_t s;
if (quantum <= xnclock_get_gravity(&nkclock, user))
return -EINVAL;
xnlock_get_irqsave(&nklock, s);
sched = thread->sched;
thread->rrperiod = quantum;
if (quantum != XN_INFINITE) {
if (thread->base_class->sched_tick == NULL) {
if (quantum <= xnclock_get_gravity(&nkclock, user) ||
thread->base_class->sched_tick == NULL) {
xnlock_put_irqrestore(&nklock, s);
return -EINVAL;
}
......
......@@ -644,7 +644,7 @@ static int autotune_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void *ar
struct autotune_context *context;
struct autotune_setup setup;
struct gravity_tuner *tuner;
int period, ret;
int ret;
if (request == AUTOTUNE_RTIOC_RESET) {
xnclock_reset_gravity(&nkclock);
......@@ -678,10 +678,6 @@ static int autotune_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void *ar
return -EINVAL;
}
ret = rtdm_safe_copy_from_user(fd, &period, arg, sizeof(period));
if (ret)
return ret;
ret = tuner->init_tuner(tuner);
if (ret)
return ret;
......
......@@ -42,10 +42,10 @@ struct bufp_socket {
char label[XNOBJECT_NAME_LEN];
off_t rdoff;
off_t rdrsvd;
off_t wroff;
off_t wrrsvd;
size_t fillsz;
u_long wrtoken;
u_long rdtoken;
rtdm_event_t i_event;
rtdm_event_t o_event;
......@@ -115,8 +115,8 @@ static int bufp_socket(struct rtdm_fd *fd)
sk->rdoff = 0;
sk->wroff = 0;
sk->fillsz = 0;
sk->rdtoken = 0;
sk->wrtoken = 0;
sk->rdrsvd = 0;
sk->wrrsvd = 0;
sk->status = 0;
sk->handle = 0;
sk->rx_timeout = RTDM_TIMEOUT_INFINITE;
......@@ -162,11 +162,10 @@ static ssize_t __bufp_readbuf(struct bufp_socket *sk,
struct bufp_wait_context wait, *bufwc;
struct rtipc_wait_context *wc;
struct xnthread *waiter;
size_t rbytes, n, avail;
ssize_t len, ret, xret;
rtdm_toseq_t toseq;
ssize_t len, ret;
size_t rbytes, n;
rtdm_lockctx_t s;
u_long rdtoken;
off_t rdoff;
int resched;
......@@ -181,18 +180,15 @@ redo:
* We should be able to read a complete message of the
* requested length, or block.
*/
if (sk->fillsz < len)
avail = sk->fillsz - sk->rdrsvd;
if (avail < len)
goto wait;
/*
* Draw the next read token so that we can later
* detect preemption.
*/
rdtoken = ++sk->rdtoken;
/* Read from the buffer in a circular way. */
/* Reserve a read slot into the circular buffer. */
rdoff = sk->rdoff;
rbytes = len;
sk->rdoff = (rdoff + len) % sk->bufsz;
sk->rdrsvd += len;
rbytes = ret = len;
do {
if (rdoff + rbytes > sk->bufsz)
......@@ -200,37 +196,30 @@ redo:
else
n = rbytes;
/*
* Release the lock while retrieving the data
* to keep latency low.
* Drop the lock before copying data to
* user. The read slot is consumed in any
* case: the non-copied portion of the message
* is lost on bad write.
*/
cobalt_atomic_leave(s);
ret = xnbufd_copy_from_kmem(bufd, sk->bufmem + rdoff, n);
if (ret < 0)
return ret;
xret = xnbufd_copy_from_kmem(bufd, sk->bufmem + rdoff, n);
cobalt_atomic_enter(s);
/*
* In case we were preempted while retrieving
* the message, we have to re-read the whole
* thing.
*/
if (sk->rdtoken != rdtoken) {
xnbufd_reset(bufd);
goto redo;
if (xret < 0) {
ret = -EFAULT;
break;
}
rdoff = (rdoff + n) % sk->bufsz;
rbytes -= n;
rdoff = (rdoff + n) % sk->bufsz;
} while (rbytes > 0);
sk->fillsz -= len;
sk->rdoff = rdoff;
ret = len;
resched = 0;
if (sk->fillsz + len == sk->bufsz) /* -> writable */
if (sk->fillsz == sk->bufsz) /* -> writable */
resched |= xnselect_signal(&sk->priv->send_block, POLLOUT);
sk->rdrsvd -= len;
sk->fillsz -= len;
if (sk->fillsz == 0) /* -> non-readable */
resched |= xnselect_signal(&sk->priv->recv_block, 0);
......@@ -416,11 +405,10 @@ static ssize_t __bufp_writebuf(struct bufp_socket *rsk,
struct bufp_wait_context wait, *bufwc;
struct rtipc_wait_context *wc;
struct xnthread *waiter;
size_t wbytes, n, avail;
ssize_t len, ret, xret;
rtdm_toseq_t toseq;
rtdm_lockctx_t s;
ssize_t len, ret;
size_t wbytes, n;
u_long wrtoken;
off_t wroff;
int resched;
......@@ -429,24 +417,21 @@ static ssize_t __bufp_writebuf(struct bufp_socket *rsk,
rtdm_toseq_init(&toseq, sk->tx_timeout);
cobalt_atomic_enter(s);
redo:
for (;;) {
/*
* We should be able to write the entire message at
* once or block.
* No short or scattered writes: we should write the
* entire message atomically or block.
*/
if (rsk->fillsz + len > rsk->bufsz)
avail = rsk->fillsz + rsk->wrrsvd;
if (avail + len > rsk->bufsz)
goto wait;
/*
* Draw the next write token so that we can later
* detect preemption.
*/
wrtoken = ++rsk->wrtoken;
/* Write to the buffer in a circular way. */
/* Reserve a write slot into the circular buffer. */
wroff = rsk->wroff;
wbytes = len;
rsk->wroff = (wroff + len) % rsk->bufsz;
rsk->wrrsvd += len;
wbytes = ret = len;
do {
if (wroff + wbytes > rsk->bufsz)
......@@ -454,33 +439,30 @@ redo:
else
n = wbytes;
/*
* Release the lock while copying the data to
* keep latency low.
* We have to drop the lock while reading in
* data, but we can't rollback on bad read
* from user because some other thread might
* have populated the memory ahead of our
* write slot already: bluntly clear the
* unavailable bytes on copy error.
*/
cobalt_atomic_leave(s);
ret = xnbufd_copy_to_kmem(rsk->bufmem + wroff, bufd, n);
if (ret < 0)
return ret;
xret = xnbufd_copy_to_kmem(rsk->bufmem + wroff, bufd, n);
cobalt_atomic_enter(s);
/*
* In case we were preempted while copying the
* message, we have to write the whole thing
* again.
*/
if (rsk->wrtoken != wrtoken) {
xnbufd_reset(bufd);
goto redo;
if (xret < 0) {
memset(rsk->bufmem + wroff + n - xret, 0, xret);
ret = -EFAULT;
break;
}
wroff = (wroff + n) % rsk->bufsz;
wbytes -= n;
wroff = (wroff + n) % rsk->bufsz;
} while (wbytes > 0);
rsk->fillsz += len;
rsk->wroff = wroff;
ret = len;
resched = 0;
rsk->wrrsvd -= len;
resched = 0;
if (rsk->fillsz == len) /* -> readable */
resched |= xnselect_signal(&rsk->priv->recv_block, POLLIN);
......