Commit 690551a1 authored by Philippe Gerum's avatar Philippe Gerum

lib, kernel: ipipe: hard protect against preemption by head domain

parent fd0dca86
...@@ -82,9 +82,9 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -82,9 +82,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
raw_local_irq_save(flags); \ flags = hard_local_irq_save(); \
v->counter = v->counter c_op i; \ v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \ hard_local_irq_restore(flags); \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
...@@ -93,9 +93,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -93,9 +93,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
\ \
raw_local_irq_save(flags); \ flags = hard_local_irq_save(); \
ret = (v->counter = v->counter c_op i); \ ret = (v->counter = v->counter c_op i); \
raw_local_irq_restore(flags); \ hard_local_irq_restore(flags); \
\ \
return ret; \ return ret; \
} }
......
...@@ -22,20 +22,20 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; ...@@ -22,20 +22,20 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* this is the substitute */ * this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \ #define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \ (f) = hard_local_irq_save(); \
arch_spin_lock(s); \ arch_spin_lock(s); \
} while(0) } while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \ #define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \ arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spin_unlock(s); \ arch_spin_unlock(s); \
local_irq_restore(f); \ hard_local_irq_restore(f); \
} while(0) } while(0)
#else #else
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) # define _atomic_spin_lock_irqsave(l,f) do { (f) = hard_local_irq_save(); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) # define _atomic_spin_unlock_irqrestore(l,f) do { hard_local_irq_restore(f); } while (0)
#endif #endif
/* /*
......
...@@ -23,7 +23,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, ...@@ -23,7 +23,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
if (size == 8 && sizeof(unsigned long) != 8) if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr); wrong_size_cmpxchg(ptr);
raw_local_irq_save(flags); flags = hard_local_irq_save();
switch (size) { switch (size) {
case 1: prev = *(u8 *)ptr; case 1: prev = *(u8 *)ptr;
if (prev == old) if (prev == old)
...@@ -44,7 +44,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, ...@@ -44,7 +44,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
default: default:
wrong_size_cmpxchg(ptr); wrong_size_cmpxchg(ptr);
} }
raw_local_irq_restore(flags); hard_local_irq_restore(flags);
return prev; return prev;
} }
...@@ -57,11 +57,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, ...@@ -57,11 +57,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 prev; u64 prev;
unsigned long flags; unsigned long flags;
raw_local_irq_save(flags); flags = hard_local_irq_save();
prev = *(u64 *)ptr; prev = *(u64 *)ptr;
if (prev == old) if (prev == old)
*(u64 *)ptr = new; *(u64 *)ptr = new;
raw_local_irq_restore(flags); hard_local_irq_restore(flags);
return prev; return prev;
} }
......
...@@ -113,7 +113,7 @@ void context_tracking_enter(enum ctx_state state) ...@@ -113,7 +113,7 @@ void context_tracking_enter(enum ctx_state state)
* helpers are enough to protect RCU uses inside the exception. So * helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ. * just return immediately if we detect we are in an IRQ.
*/ */
if (in_interrupt()) if (!ipipe_root_p || in_interrupt())
return; return;
local_irq_save(flags); local_irq_save(flags);
...@@ -169,7 +169,7 @@ void context_tracking_exit(enum ctx_state state) ...@@ -169,7 +169,7 @@ void context_tracking_exit(enum ctx_state state)
{ {
unsigned long flags; unsigned long flags;
if (in_interrupt()) if (!ipipe_root_p || in_interrupt())
return; return;
local_irq_save(flags); local_irq_save(flags);
......
...@@ -119,8 +119,8 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { ...@@ -119,8 +119,8 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
*/ */
atomic_t kgdb_active = ATOMIC_INIT(-1); atomic_t kgdb_active = ATOMIC_INIT(-1);
EXPORT_SYMBOL_GPL(kgdb_active); EXPORT_SYMBOL_GPL(kgdb_active);
static DEFINE_RAW_SPINLOCK(dbg_master_lock); static IPIPE_DEFINE_RAW_SPINLOCK(dbg_master_lock);
static DEFINE_RAW_SPINLOCK(dbg_slave_lock); static IPIPE_DEFINE_RAW_SPINLOCK(dbg_slave_lock);
/* /*
* We use NR_CPUs not PERCPU, in case kgdb is used to debug early * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
...@@ -461,7 +461,9 @@ static int kgdb_reenter_check(struct kgdb_state *ks) ...@@ -461,7 +461,9 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
static void dbg_touch_watchdogs(void) static void dbg_touch_watchdogs(void)
{ {
touch_softlockup_watchdog_sync(); touch_softlockup_watchdog_sync();
#ifndef CONFIG_IPIPE
clocksource_touch_watchdog(); clocksource_touch_watchdog();
#endif
rcu_cpu_stall_reset(); rcu_cpu_stall_reset();
} }
...@@ -492,7 +494,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -492,7 +494,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
* Interrupts will be restored by the 'trap return' code, except when * Interrupts will be restored by the 'trap return' code, except when
* single stepping. * single stepping.
*/ */
local_irq_save(flags); flags = hard_local_irq_save();
cpu = ks->cpu; cpu = ks->cpu;
kgdb_info[cpu].debuggerinfo = regs; kgdb_info[cpu].debuggerinfo = regs;
...@@ -541,7 +543,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -541,7 +543,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_dec(&slaves_in_kgdb); atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs(); dbg_touch_watchdogs();
local_irq_restore(flags); hard_local_irq_restore(flags);
return 0; return 0;
} }
cpu_relax(); cpu_relax();
...@@ -559,7 +561,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -559,7 +561,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
atomic_set(&kgdb_active, -1); atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock); raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs(); dbg_touch_watchdogs();
local_irq_restore(flags); hard_local_irq_restore(flags);
goto acquirelock; goto acquirelock;
} }
...@@ -676,7 +678,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -676,7 +678,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
atomic_set(&kgdb_active, -1); atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock); raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs(); dbg_touch_watchdogs();
local_irq_restore(flags); hard_local_irq_restore(flags);
return kgdb_info[cpu].ret_state; return kgdb_info[cpu].ret_state;
} }
...@@ -795,9 +797,9 @@ static void kgdb_console_write(struct console *co, const char *s, ...@@ -795,9 +797,9 @@ static void kgdb_console_write(struct console *co, const char *s,
if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
return; return;
local_irq_save(flags); flags = hard_local_irq_save();
gdbstub_msg_write(s, count); gdbstub_msg_write(s, count);
local_irq_restore(flags); hard_local_irq_restore(flags);
} }
static struct console kgdbcons = { static struct console kgdbcons = {
......
...@@ -1109,7 +1109,7 @@ bool try_module_get(struct module *module) ...@@ -1109,7 +1109,7 @@ bool try_module_get(struct module *module)
bool ret = true; bool ret = true;
if (module) { if (module) {
preempt_disable(); unsigned long flags = hard_preempt_disable();
/* Note: here, we can fail to get a reference */ /* Note: here, we can fail to get a reference */
if (likely(module_is_live(module) && if (likely(module_is_live(module) &&
atomic_inc_not_zero(&module->refcnt) != 0)) atomic_inc_not_zero(&module->refcnt) != 0))
...@@ -1117,7 +1117,7 @@ bool try_module_get(struct module *module) ...@@ -1117,7 +1117,7 @@ bool try_module_get(struct module *module)
else else
ret = false; ret = false;
preempt_enable(); hard_preempt_enable(flags);
} }
return ret; return ret;
} }
...@@ -1128,11 +1128,11 @@ void module_put(struct module *module) ...@@ -1128,11 +1128,11 @@ void module_put(struct module *module)
int ret; int ret;
if (module) { if (module) {
preempt_disable(); unsigned long flags = hard_preempt_disable();
ret = atomic_dec_if_positive(&module->refcnt); ret = atomic_dec_if_positive(&module->refcnt);
WARN_ON(ret < 0); /* Failed to put refcount */ WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_); trace_module_put(module, _RET_IP_);
preempt_enable(); hard_preempt_enable(flags);
} }
} }
EXPORT_SYMBOL(module_put); EXPORT_SYMBOL(module_put);
......
...@@ -286,6 +286,7 @@ static int create_image(int platform_mode) ...@@ -286,6 +286,7 @@ static int create_image(int platform_mode)
goto Enable_cpus; goto Enable_cpus;
local_irq_disable(); local_irq_disable();
hard_cond_local_irq_disable();
error = syscore_suspend(); error = syscore_suspend();
if (error) { if (error) {
...@@ -445,6 +446,7 @@ static int resume_target_kernel(bool platform_mode) ...@@ -445,6 +446,7 @@ static int resume_target_kernel(bool platform_mode)
goto Enable_cpus; goto Enable_cpus;
local_irq_disable(); local_irq_disable();
hard_cond_local_irq_disable();
error = syscore_suspend(); error = syscore_suspend();
if (error) if (error)
...@@ -563,6 +565,7 @@ int hibernation_platform_enter(void) ...@@ -563,6 +565,7 @@ int hibernation_platform_enter(void)
goto Enable_cpus; goto Enable_cpus;
local_irq_disable(); local_irq_disable();
hard_cond_local_irq_disable();
syscore_suspend(); syscore_suspend();
if (pm_wakeup_pending()) { if (pm_wakeup_pending()) {
error = -EAGAIN; error = -EAGAIN;
......
...@@ -29,15 +29,15 @@ ...@@ -29,15 +29,15 @@
* Ensure each lock is in a separate cacheline. * Ensure each lock is in a separate cacheline.
*/ */
static union { static union {
raw_spinlock_t lock; ipipe_spinlock_t lock;
char pad[L1_CACHE_BYTES]; char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = { [0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), .lock = IPIPE_SPIN_LOCK_UNLOCKED,
}, },
}; };
static inline raw_spinlock_t *lock_addr(const atomic64_t *v) static inline ipipe_spinlock_t *lock_addr(const atomic64_t *v)
{ {
unsigned long addr = (unsigned long) v; unsigned long addr = (unsigned long) v;
...@@ -49,7 +49,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) ...@@ -49,7 +49,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
long long atomic64_read(const atomic64_t *v) long long atomic64_read(const atomic64_t *v)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
long long val; long long val;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
...@@ -62,7 +62,7 @@ EXPORT_SYMBOL(atomic64_read); ...@@ -62,7 +62,7 @@ EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i) void atomic64_set(atomic64_t *v, long long i)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
v->counter = i; v->counter = i;
...@@ -74,7 +74,7 @@ EXPORT_SYMBOL(atomic64_set); ...@@ -74,7 +74,7 @@ EXPORT_SYMBOL(atomic64_set);
void atomic64_##op(long long a, atomic64_t *v) \ void atomic64_##op(long long a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ ipipe_spinlock_t *lock = lock_addr(v); \
\ \
raw_spin_lock_irqsave(lock, flags); \ raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \ v->counter c_op a; \
...@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atomic64_##op); ...@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atomic64_##op);
long long atomic64_##op##_return(long long a, atomic64_t *v) \ long long atomic64_##op##_return(long long a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \ long long val; \
\ \
raw_spin_lock_irqsave(lock, flags); \ raw_spin_lock_irqsave(lock, flags); \
...@@ -100,7 +100,7 @@ EXPORT_SYMBOL(atomic64_##op##_return); ...@@ -100,7 +100,7 @@ EXPORT_SYMBOL(atomic64_##op##_return);
long long atomic64_fetch_##op(long long a, atomic64_t *v) \ long long atomic64_fetch_##op(long long a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \ long long val; \
\ \
raw_spin_lock_irqsave(lock, flags); \ raw_spin_lock_irqsave(lock, flags); \
...@@ -137,7 +137,7 @@ ATOMIC64_OPS(xor, ^=) ...@@ -137,7 +137,7 @@ ATOMIC64_OPS(xor, ^=)
long long atomic64_dec_if_positive(atomic64_t *v) long long atomic64_dec_if_positive(atomic64_t *v)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
long long val; long long val;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
...@@ -152,7 +152,7 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); ...@@ -152,7 +152,7 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
long long val; long long val;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
...@@ -167,7 +167,7 @@ EXPORT_SYMBOL(atomic64_cmpxchg); ...@@ -167,7 +167,7 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new) long long atomic64_xchg(atomic64_t *v, long long new)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
long long val; long long val;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
...@@ -181,7 +181,7 @@ EXPORT_SYMBOL(atomic64_xchg); ...@@ -181,7 +181,7 @@ EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u) int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); ipipe_spinlock_t *lock = lock_addr(v);
int ret = 0; int ret = 0;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
......
...@@ -7,12 +7,19 @@ ...@@ -7,12 +7,19 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ipipe.h>
notrace static unsigned int check_preemption_disabled(const char *what1, notrace static unsigned int check_preemption_disabled(const char *what1,
const char *what2) const char *what2)
{ {
int this_cpu = raw_smp_processor_id(); int this_cpu = raw_smp_processor_id();
if (hard_irqs_disabled())
goto out;
if (!ipipe_root_p)
goto out;
if (likely(preempt_count())) if (likely(preempt_count()))
goto out; goto out;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment