Commit a3f1a900 authored by Steven Seeger's avatar Steven Seeger

fix issue with clobbered MSR bits

parent 637effa1
...@@ -175,6 +175,7 @@ void giveup_fpu(struct task_struct *tsk) ...@@ -175,6 +175,7 @@ void giveup_fpu(struct task_struct *tsk)
msr_check_and_set(MSR_FP); msr_check_and_set(MSR_FP);
__giveup_fpu(tsk); __giveup_fpu(tsk);
msr_check_and_clear(MSR_FP); msr_check_and_clear(MSR_FP);
flags &= ~MSR_FP;
hard_cond_local_irq_restore(flags); hard_cond_local_irq_restore(flags);
} }
EXPORT_SYMBOL(giveup_fpu); EXPORT_SYMBOL(giveup_fpu);
...@@ -207,6 +208,11 @@ void flush_fp_to_thread(struct task_struct *tsk) ...@@ -207,6 +208,11 @@ void flush_fp_to_thread(struct task_struct *tsk)
*/ */
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_fpu(tsk); giveup_fpu(tsk);
/* giveup_fpu clears the MSR_FP bit from MSR
* unconditionally
*/
flags &= ~MSR_FP;
} }
hard_preempt_enable(flags); hard_preempt_enable(flags);
} }
...@@ -221,6 +227,7 @@ void enable_kernel_fp(void) ...@@ -221,6 +227,7 @@ void enable_kernel_fp(void)
flags = hard_cond_local_irq_save(); flags = hard_cond_local_irq_save();
cpumsr = msr_check_and_set(MSR_FP); cpumsr = msr_check_and_set(MSR_FP);
flags |= MSR_FP; /* must exit this routine with MSR_FP bit set */
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current); check_if_tm_restore_required(current);
...@@ -289,6 +296,7 @@ void enable_kernel_altivec(void) ...@@ -289,6 +296,7 @@ void enable_kernel_altivec(void)
flags = hard_cond_local_irq_save(); flags = hard_cond_local_irq_save();
cpumsr = msr_check_and_set(MSR_VEC); cpumsr = msr_check_and_set(MSR_VEC);
flags |= MSR_VEC; /* must exit this routine with MSR_VEC set in MSR */
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current); check_if_tm_restore_required(current);
...@@ -321,6 +329,10 @@ void flush_altivec_to_thread(struct task_struct *tsk) ...@@ -321,6 +329,10 @@ void flush_altivec_to_thread(struct task_struct *tsk)
if (tsk->thread.regs->msr & MSR_VEC) { if (tsk->thread.regs->msr & MSR_VEC) {
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_altivec(tsk); giveup_altivec(tsk);
/* giveup_altivec() clears MSR_VEC
* unconditionally from MSR
*/
flags &= ~MSR_VEC;
} }
hard_preempt_enable(flags); hard_preempt_enable(flags);
} }
...@@ -405,6 +417,10 @@ void flush_vsx_to_thread(struct task_struct *tsk) ...@@ -405,6 +417,10 @@ void flush_vsx_to_thread(struct task_struct *tsk)
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_vsx(tsk); giveup_vsx(tsk);
/* giveup_vsx() clears MSR_FP,VEC,VSX unconditionally
* so clear them in flags
*/
flags &= ~(MSR_FP|MSR_VEC|MSR_VSX);
} }
hard_preempt_enable(flags); hard_preempt_enable(flags);
} }
...@@ -435,6 +451,8 @@ void giveup_spe(struct task_struct *tsk) ...@@ -435,6 +451,8 @@ void giveup_spe(struct task_struct *tsk)
msr_check_and_set(MSR_SPE); msr_check_and_set(MSR_SPE);
__giveup_spe(tsk); __giveup_spe(tsk);
msr_check_and_clear(MSR_SPE); msr_check_and_clear(MSR_SPE);
/* must exit this routine with NSR_SPE cleared in MSR */
flags &= ~MSR_SPE;
hard_cond_local_irq_restore(flags); hard_cond_local_irq_restore(flags);
} }
EXPORT_SYMBOL(giveup_spe); EXPORT_SYMBOL(giveup_spe);
...@@ -447,7 +465,8 @@ void enable_kernel_spe(void) ...@@ -447,7 +465,8 @@ void enable_kernel_spe(void)
flags = hard_cond_local_irq_save(); flags = hard_cond_local_irq_save();
msr_check_and_set(MSR_SPE); msr_check_and_set(MSR_SPE);
/* must exit this routine with MSR_SPE set in MSR */
flags |= MSR_SPE;
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
check_if_tm_restore_required(current); check_if_tm_restore_required(current);
__giveup_spe(current); __giveup_spe(current);
...@@ -465,6 +484,10 @@ void flush_spe_to_thread(struct task_struct *tsk) ...@@ -465,6 +484,10 @@ void flush_spe_to_thread(struct task_struct *tsk)
BUG_ON(tsk != current); BUG_ON(tsk != current);
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk); giveup_spe(tsk);
/* giveup_spe clears MSR_SPE from MSR, so must clear
* it here to exit rouitine properly
*/
flags &= MSR_SPE;
} }
hard_preempt_enable(flags); hard_preempt_enable(flags);
} }
...@@ -527,6 +550,7 @@ void giveup_all(struct task_struct *tsk) ...@@ -527,6 +550,7 @@ void giveup_all(struct task_struct *tsk)
#endif #endif
msr_check_and_clear(msr_all_available); msr_check_and_clear(msr_all_available);
flags &= ~msr_all_available; /* clear all available just in case */
hard_cond_local_irq_restore(flags); hard_cond_local_irq_restore(flags);
} }
EXPORT_SYMBOL(giveup_all); EXPORT_SYMBOL(giveup_all);
...@@ -559,6 +583,7 @@ void restore_math(struct pt_regs *regs) ...@@ -559,6 +583,7 @@ void restore_math(struct pt_regs *regs)
} }
msr_check_and_clear(msr_all_available); msr_check_and_clear(msr_all_available);
flags &= ~msr_all_available; /* clear all available just in case */
hard_cond_local_irq_restore(flags); hard_cond_local_irq_restore(flags);
regs->msr = msr; regs->msr = msr;
...@@ -1224,6 +1249,8 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -1224,6 +1249,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
/* Save FPU, Altivec, VSX and SPE state */ /* Save FPU, Altivec, VSX and SPE state */
giveup_all(prev); giveup_all(prev);
/* giveup_all clears msr_all_available bits unconditionally */
flags &= ~msr_all_available;
__switch_to_tm(prev, new); __switch_to_tm(prev, new);
...@@ -1888,6 +1915,15 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) ...@@ -1888,6 +1915,15 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
} }
#ifdef CONFIG_IPIPE
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
return 0;
}
#else /* !CONFIG_IPIPE */
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes) unsigned long nbytes)
{ {
...@@ -1924,6 +1960,8 @@ int validate_sp(unsigned long sp, struct task_struct *p, ...@@ -1924,6 +1960,8 @@ int validate_sp(unsigned long sp, struct task_struct *p,
return valid_irq_stack(sp, p, nbytes); return valid_irq_stack(sp, p, nbytes);
} }
#endif /* CONFIG_IPIPE */
EXPORT_SYMBOL(validate_sp); EXPORT_SYMBOL(validate_sp);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment