Commit 6ed161f7 authored by Steven Seeger's avatar Steven Seeger

fix issue with clobbered MSR bits

parent fed8485d
......@@ -199,6 +199,7 @@ void giveup_fpu(struct task_struct *tsk)
msr_check_and_set(MSR_FP);
__giveup_fpu(tsk);
msr_check_and_clear(MSR_FP);
flags &= ~MSR_FP;
hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL(giveup_fpu);
......@@ -231,6 +232,11 @@ void flush_fp_to_thread(struct task_struct *tsk)
*/
BUG_ON(tsk != current);
giveup_fpu(tsk);
/* giveup_fpu clears the MSR_FP bit from MSR
* unconditionally
*/
flags &= ~MSR_FP;
}
hard_preempt_enable(flags);
}
......@@ -245,6 +251,7 @@ void enable_kernel_fp(void)
flags = hard_cond_local_irq_save();
cpumsr = msr_check_and_set(MSR_FP);
flags |= MSR_FP; /* must exit this routine with MSR_FP bit set */
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current);
......@@ -313,6 +320,7 @@ void enable_kernel_altivec(void)
flags = hard_cond_local_irq_save();
cpumsr = msr_check_and_set(MSR_VEC);
flags |= MSR_VEC; /* must exit this routine with MSR_VEC set in MSR */
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current);
......@@ -345,6 +353,10 @@ void flush_altivec_to_thread(struct task_struct *tsk)
if (tsk->thread.regs->msr & MSR_VEC) {
BUG_ON(tsk != current);
giveup_altivec(tsk);
/* giveup_altivec() clears MSR_VEC
* unconditionally from MSR
*/
flags &= ~MSR_VEC;
}
hard_preempt_enable(flags);
}
......@@ -429,6 +441,10 @@ void flush_vsx_to_thread(struct task_struct *tsk)
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current);
giveup_vsx(tsk);
/* giveup_vsx() clears MSR_FP,VEC,VSX unconditionally
* so clear them in flags
*/
flags &= ~(MSR_FP|MSR_VEC|MSR_VSX);
}
hard_preempt_enable(flags);
}
......@@ -459,6 +475,8 @@ void giveup_spe(struct task_struct *tsk)
msr_check_and_set(MSR_SPE);
__giveup_spe(tsk);
msr_check_and_clear(MSR_SPE);
/* must exit this routine with NSR_SPE cleared in MSR */
flags &= ~MSR_SPE;
hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL(giveup_spe);
......@@ -471,7 +489,8 @@ void enable_kernel_spe(void)
flags = hard_cond_local_irq_save();
msr_check_and_set(MSR_SPE);
/* must exit this routine with MSR_SPE set in MSR */
flags |= MSR_SPE;
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
check_if_tm_restore_required(current);
__giveup_spe(current);
......@@ -489,6 +508,10 @@ void flush_spe_to_thread(struct task_struct *tsk)
BUG_ON(tsk != current);
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
/* giveup_spe clears MSR_SPE from MSR, so must clear
* it here to exit rouitine properly
*/
flags &= MSR_SPE;
}
hard_preempt_enable(flags);
}
......@@ -551,6 +574,7 @@ void giveup_all(struct task_struct *tsk)
#endif
msr_check_and_clear(msr_all_available);
flags &= ~msr_all_available; /* clear all available just in case */
hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL(giveup_all);
......@@ -583,6 +607,7 @@ void restore_math(struct pt_regs *regs)
}
msr_check_and_clear(msr_all_available);
flags &= ~msr_all_available; /* clear all available just in case */
hard_cond_local_irq_restore(flags);
regs->msr = msr;
......@@ -1248,6 +1273,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
/* Save FPU, Altivec, VSX and SPE state */
giveup_all(prev);
/* giveup_all clears msr_all_available bits unconditionally */
flags &= ~msr_all_available;
__switch_to_tm(prev, new);
......@@ -2025,6 +2052,15 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
}
#ifdef CONFIG_IPIPE
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
return 0;
}
#else /* !CONFIG_IPIPE */
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
......@@ -2061,6 +2097,8 @@ int validate_sp(unsigned long sp, struct task_struct *p,
return valid_irq_stack(sp, p, nbytes);
}
#endif /* CONFIG_IPIPE */
EXPORT_SYMBOL(validate_sp);
unsigned long get_wchan(struct task_struct *p)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment