Commit eda0c6d6 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390: fix race on TIF_MCCK_PENDING

There is a small race window in the __switch_to code in regard to
the transfer of the TIF_MCCK_PENDING bit from the previous to the
next task. The bit is transferred before the task struct pointer
and the thread-info pointer for the next task has been stored to
lowcore. If a machine check sets the TIF_MCCK_PENDING bit between
the transfer code and the store of current/thread_info the bit
is still set for the previous task. And if the previous task has
terminated it can get lost. The effect is that a pending CRW is
not retrieved until the next machine checks sets TIF_MCCK_PENDING.
To fix this reorder __switch_to to first store the task struct
and thread-info pointer and then do the transfer of the bit.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent e5b8d755
......@@ -145,22 +145,23 @@ STACK_SIZE = 1 << STACK_SHIFT
* gpr2 = prev
*/
ENTRY(__switch_to)
stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
ahi %r15,STACK_SIZE # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
l %r15,__THREAD_ksp(%r3) # load kernel stack of next
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
jz 0f
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
l %r15,__THREAD_ksp(%r3) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
st %r3,__LC_CURRENT # store task struct of next
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
st %r5,__LC_THREAD_INFO # store thread info of next
ahi %r5,STACK_SIZE # end of kernel stack of next
st %r5,__LC_KERNEL_STACK # store end of kernel stack
0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
__critical_start:
......
......@@ -164,22 +164,23 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
* gpr2 = prev
*/
ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
aghi %r15,STACK_SIZE # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
jz 0f
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
stg %r3,__LC_CURRENT # store task struct of next
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
stg %r5,__LC_THREAD_INFO # store thread info of next
aghi %r5,STACK_SIZE # end of kernel stack of next
stg %r5,__LC_KERNEL_STACK # store end of kernel stack
0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
__critical_start:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment