Commit 97f2645f authored by Masahiro Yamada's avatar Masahiro Yamada Committed by Linus Torvalds

tree-wide: replace config_enabled() with IS_ENABLED()

The use of config_enabled() against config options is ambiguous.  In
practical terms, config_enabled() is equivalent to IS_BUILTIN(), but the
author might have used it for the meaning of IS_ENABLED().  Using
IS_ENABLED(), IS_BUILTIN(), IS_MODULE() etc.  makes the intention
clearer.

This commit replaces config_enabled() with IS_ENABLED() where possible.
This commit is only touching bool config options.

I noticed two cases where config_enabled() is used against a tristate
option:

 - config_enabled(CONFIG_HWMON)
  [ drivers/net/wireless/ath/ath10k/thermal.c ]

 - config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)
  [ drivers/gpu/drm/gma500/opregion.c ]

I did not touch them because they should be converted to IS_BUILTIN()
in order to keep the logic, but I was not sure it was the authors'
intention.

Link: http://lkml.kernel.org/r/1465215656-20569-1-git-send-email-yamada.masahiro@socionext.comSigned-off-by: Masahiro Yamada's avatarMasahiro Yamada <yamada.masahiro@socionext.com>
Acked-by: default avatarKees Cook <keescook@chromium.org>
Cc: Stas Sergeev <stsp@list.ru>
Cc: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Joshua Kinard <kumba@gentoo.org>
Cc: Jiri Slaby <jslaby@suse.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Cc: "Dmitry V. Levin" <ldv@altlinux.org>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Will Drewry <wad@chromium.org>
Cc: Nikolay Martynov <mar.kolya@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
Cc: Rafal Milecki <zajec5@gmail.com>
Cc: James Cowgill <James.Cowgill@imgtec.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Alex Smith <alex.smith@imgtec.com>
Cc: Adam Buchbinder <adam.buchbinder@gmail.com>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Mikko Rapeli <mikko.rapeli@iki.fi>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Brian Norris <computersforpeace@gmail.com>
Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
Cc: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Roland McGrath <roland@hack.frob.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Kalle Valo <kvalo@qca.qualcomm.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Tony Wu <tung7970@gmail.com>
Cc: Huaitong Han <huaitong.han@intel.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrea Gelmini <andrea.gelmini@gelma.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Rabin Vincent <rabin@rab.in>
Cc: "Maciej W. Rozycki" <macro@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1c8cb409
......@@ -462,7 +462,7 @@ static inline unsigned int mips_cm_max_vp_width(void)
if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
if (config_enabled(CONFIG_SMP))
if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings;
return 1;
......
......@@ -159,7 +159,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* it better already be global)
*/
if (pte_none(*buddy)) {
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
......@@ -172,7 +172,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
if (config_enabled(CONFIG_XPA)) {
if (IS_ENABLED(CONFIG_XPA)) {
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else {
......@@ -319,7 +319,7 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~_PAGE_WRITE;
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
......@@ -328,7 +328,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~_PAGE_MODIFIED;
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
......@@ -337,7 +337,7 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~_PAGE_ACCESSED;
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
......@@ -347,7 +347,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
......@@ -358,7 +358,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
......@@ -369,7 +369,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
if (!(pte.pte_low & _PAGE_NO_READ)) {
if (!config_enabled(CONFIG_XPA))
if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
......
......@@ -16,10 +16,10 @@ static inline const int *get_compat_mode1_syscalls(void)
0, /* null terminated */
};
if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
return syscalls_O32;
if (config_enabled(CONFIG_MIPS32_N32))
if (IS_ENABLED(CONFIG_MIPS32_N32))
return syscalls_N32;
BUG();
......
......@@ -19,8 +19,8 @@ extern struct mips_abi mips_abi_32;
((ka)->sa.sa_flags & SA_SIGINFO))
#else
#define sig_uses_siginfo(ka, abi) \
(config_enabled(CONFIG_64BIT) ? 1 : \
(config_enabled(CONFIG_TRAD_SIGNALS) ? \
(IS_ENABLED(CONFIG_64BIT) ? 1 : \
(IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
#endif
......
......@@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
int ret;
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
if ((config_enabled(CONFIG_32BIT) ||
if ((IS_ENABLED(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall))
i++;
......
......@@ -88,7 +88,7 @@ extern u64 __ua_limit;
*/
static inline bool eva_kernel_access(void)
{
if (!config_enabled(CONFIG_EVA))
if (!IS_ENABLED(CONFIG_EVA))
return false;
return segment_eq(get_fs(), get_ds());
......
......@@ -75,7 +75,7 @@ void __init device_tree_init(void)
const char *get_system_type(void)
{
if (config_enabled(CONFIG_MACH_JZ4780))
if (IS_ENABLED(CONFIG_MACH_JZ4780))
return "JZ4780";
return "JZ4740";
......
......@@ -244,7 +244,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
static inline void check_daddiu(void)
{
......@@ -314,7 +314,7 @@ static inline void check_daddiu(void)
void __init check_bugs64_early(void)
{
if (!config_enabled(CONFIG_CPU_MIPSR6)) {
if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
check_mult_sh();
check_daddiu();
}
......@@ -322,6 +322,6 @@ void __init check_bugs64_early(void)
void __init check_bugs64(void)
{
if (!config_enabled(CONFIG_CPU_MIPSR6))
if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
check_daddi();
}
......@@ -179,7 +179,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
return -ELIBBAD;
}
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
......@@ -285,7 +285,7 @@ void mips_set_personality_fp(struct arch_elf_state *state)
* not be worried about N32/N64 binaries.
*/
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
......
......@@ -251,7 +251,7 @@ int mips_cm_probe(void)
mips_cm_probe_l2sync();
/* determine register width for this CM */
mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
......
......@@ -84,7 +84,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
......@@ -143,7 +143,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
......@@ -152,7 +152,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case dsrl_op:
if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
......@@ -161,7 +161,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case daddu_op:
if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
......@@ -170,7 +170,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
......@@ -498,7 +498,7 @@ static int dmult_func(struct pt_regs *regs, u32 ir)
s64 res;
s64 rt, rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
......@@ -530,7 +530,7 @@ static int dmultu_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rt, rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
......@@ -561,7 +561,7 @@ static int ddiv_func(struct pt_regs *regs, u32 ir)
{
s64 rt, rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
......@@ -586,7 +586,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir)
{
u64 rt, rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
......@@ -825,7 +825,7 @@ static int dclz_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
......@@ -852,7 +852,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
if (config_enabled(CONFIG_32BIT))
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
......@@ -1484,7 +1484,7 @@ fpu_emul:
break;
case ldl_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......@@ -1603,7 +1603,7 @@ fpu_emul:
break;
case ldr_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......@@ -1722,7 +1722,7 @@ fpu_emul:
break;
case sdl_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......@@ -1840,7 +1840,7 @@ fpu_emul:
break;
case sdr_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......@@ -2072,7 +2072,7 @@ fpu_emul:
break;
case lld_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......@@ -2133,7 +2133,7 @@ fpu_emul:
break;
case scd_op:
if (config_enabled(CONFIG_32BIT)) {
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
......
......@@ -148,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
......@@ -387,7 +387,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
......
......@@ -165,7 +165,7 @@ static int save_msa_extcontext(void __user *buf)
* should already have been done when handling scalar FP
* context.
*/
BUG_ON(config_enabled(CONFIG_EVA));
BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
......@@ -195,7 +195,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
unsigned int csr;
int i, err;
if (!config_enabled(CONFIG_CPU_HAS_MSA))
if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
return SIGSYS;
if (size != sizeof(*msa))
......@@ -215,7 +215,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
BUG_ON(config_enabled(CONFIG_EVA));
BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
......@@ -315,7 +315,7 @@ int protected_save_fp_context(void __user *sc)
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
if (config_enabled(CONFIG_EVA))
if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
......@@ -378,7 +378,7 @@ int protected_restore_fp_context(void __user *sc)
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
if (config_enabled(CONFIG_EVA))
if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
......
......@@ -46,8 +46,8 @@ static unsigned core_vpe_count(unsigned core)
if (threads_disabled)
return 1;
if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
&& (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
&& (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
return 1;
mips_cm_lock_other(core, 0);
......
......@@ -1025,7 +1025,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHW(addr, value, res);
else
......@@ -1044,7 +1044,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadW(addr, value, res);
else
......@@ -1063,7 +1063,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHWU(addr, value, res);
else
......@@ -1131,7 +1131,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreHW(addr, value, res);
else
......@@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreW(addr, value, res);
else
......
......@@ -784,10 +784,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
else if (config_enabled(CONFIG_32BIT) &&
!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
else if (IS_ENABLED(CONFIG_32BIT) &&
!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
......
......@@ -1025,7 +1025,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
pte_off_odd += offsetof(pte_t, pte_high);
#endif
if (config_enabled(CONFIG_XPA)) {
if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
......@@ -1643,7 +1643,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
unsigned int swmode = mode & ~hwmode;
if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
BUG_ON(swmode & 0xffff);
......@@ -2432,7 +2432,7 @@ static void config_htw_params(void)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
/* Set pointer size to size of directory pointers */
if (config_enabled(CONFIG_64BIT))
if (IS_ENABLED(CONFIG_64BIT))
pwsize |= MIPS_PWSIZE_PS_MASK;
/* PTEs may be multiple pointers long (e.g. with XPA) */
pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
......@@ -2448,7 +2448,7 @@ static void config_htw_params(void)
* the pwctl fields.
*/
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
if (config_enabled(CONFIG_64BIT))
if (IS_ENABLED(CONFIG_64BIT))
config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
......@@ -2522,7 +2522,7 @@ void build_tlb_refill_handler(void)
*/
static int run_once = 0;
if (config_enabled(CONFIG_XPA) && !cpu_has_rixi)
if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
panic("Kernels supporting XPA currently require CPUs with RIXI");
output_pgtable_bits_defines();
......
......@@ -31,7 +31,7 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
if (config_enabled(CONFIG_EVA)) {
if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
......@@ -82,7 +82,7 @@ static void __init append_memory(void *fdt, int root_off)
physical_memsize = 32 << 20;
}
if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) {
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
......
......@@ -32,7 +32,7 @@ static void free_init_pages_eva_malta(void *begin, void *end)
void __init fw_meminit(void)
{
bool eva = config_enabled(CONFIG_EVA);
bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
......
......@@ -261,7 +261,7 @@ void __init plat_mem_setup(void)
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
if (config_enabled(CONFIG_EVA))
if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
......
......@@ -426,7 +426,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src,
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
if (config_enabled(CONFIG_64BIT)) {
if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
......@@ -516,7 +516,7 @@ static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
......
......@@ -344,8 +344,8 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
*/
static inline int mmap_is_ia32(void)
{
return config_enabled(CONFIG_X86_32) ||
(config_enabled(CONFIG_COMPAT) &&
return IS_ENABLED(CONFIG_X86_32) ||
(IS_ENABLED(CONFIG_COMPAT) &&
test_thread_flag(TIF_ADDR32));
}
......
......@@ -137,9 +137,9 @@ static inline int copy_fregs_to_user(struct fregs_state __user *fx)
static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
{
if (config_enabled(CONFIG_X86_32))
if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
......@@ -150,10 +150,10 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
int err;
if (config_enabled(CONFIG_X86_32)) {
if (IS_ENABLED(CONFIG_X86_32)) {
err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
if (config_enabled(CONFIG_AS_FXSAVEQ)) {
if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
......@@ -166,9 +166,9 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
if (config_enabled(CONFIG_X86_32))
if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
......@@ -190,9 +190,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
static inline void copy_fxregs_to_kernel(struct fpu *fpu)
{
if (config_enabled(CONFIG_X86_32))
if (IS_ENABLED(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
......
......@@ -155,7 +155,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
#ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return !config_enabled(CONFIG_IA32_EMULATION) ||
return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
!(mm->context.ia32_compat == TIF_IA32);
}
#else
......
......@@ -147,7 +147,7 @@ static int force_enable_local_apic __initdata;
*/
static int __init parse_lapic(char *arg)
{
if (config_enabled(CONFIG_X86_32) && !arg)
if (IS_ENABLED(CONFIG_X86_32) && !arg)
force_enable_local_apic = 1;
else if (arg && !strncmp(arg, "notscdeadline", 13))
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
......
......@@ -523,7 +523,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
struct apic_chip_data *data = irq_data->chip_data;
int err, irq = irq_data->irq;
if (!config_enabled(CONFIG_SMP))
if (!IS_ENABLED(CONFIG_SMP))
return -EPERM;
if (!cpumask_intersects(dest, cpu_online_mask))
......
......@@ -159,8 +159,8 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
config_enabled(CONFIG_IA32_EMULATION));
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
if (!access_ok(VERIFY_WRITE, buf, size))
return -EACCES;
......@@ -268,8 +268,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
u64 xfeatures = 0;
int fx_only = 0;
ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
config_enabled(CONFIG_IA32_EMULATION));
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
if (!buf) {
fpu__clear(fpu);
......@@ -416,8 +416,8 @@ void fpu__init_prepare_fx_sw_frame(void)
fx_sw_reserved.xfeatures = xfeatures_mask;
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
if (config_enabled(CONFIG_IA32_EMULATION) ||
config_enabled(CONFIG_X86_32)) {
if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
IS_ENABLED(CONFIG_X86_32)) {
int fsave_header_size = sizeof(struct fregs_state);
fx_sw_reserved_ia32 = fx_sw_reserved;
......
......@@ -146,7 +146,7 @@ static int restore_sigcontext(struct pt_regs *regs,
buf = (void __user *)buf_val;
} get_user_catch(err);
err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32));
err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
......@@ -245,14 +245,14 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
struct fpu *fpu = &current->thread.fpu;
/* redzone */
if (config_enabled(CONFIG_X86_64))
if (IS_ENABLED(CONFIG_X86_64))