Commit 673ef509 authored by Jan Kiszka's avatar Jan Kiszka

Merge tag 'v4.14.109' into ipipe-x86-4.14.y

This is the 4.14.109 stable release
parents 802a0263 1848c32f
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 103 SUBLEVEL = 109
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille
......
...@@ -417,6 +417,14 @@ config ARC_HAS_ACCL_REGS ...@@ -417,6 +417,14 @@ config ARC_HAS_ACCL_REGS
(also referred to as r58:r59). These can also be used by gcc as GPR so (also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process kernel needs to save/restore per process
config ARC_IRQ_NO_AUTOSAVE
bool "Disable hardware autosave regfile on interrupts"
default n
help
On HS cores, taken interrupt auto saves the regfile on stack.
This is programmable and can be optionally disabled in which case
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
endif # ISA_ARCV2 endif # ISA_ARCV2
endmenu # "ARC CPU Configuration" endmenu # "ARC CPU Configuration"
......
...@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) ...@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long word) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{ {
if (!word) if (!word)
return word; return word;
...@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) ...@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long x) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{ {
int n; unsigned long n;
asm volatile( asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
......
...@@ -52,6 +52,17 @@ ...@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES #define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
/*
* Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
* ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
* alignment for any atomic64_t embedded in buffer.
* Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
* value of 4 (and not 8) in ARC ABI.
*/
#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
#define ARCH_SLAB_MINALIGN 8
#endif
extern void arc_cache_init(void); extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void); extern void read_decode_cache_bcr(void);
......
...@@ -17,6 +17,33 @@ ...@@ -17,6 +17,33 @@
; ;
; Now manually save: r12, sp, fp, gp, r25 ; Now manually save: r12, sp, fp, gp, r25
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
.ifnc \called_from, exception
st.as r9, [sp, -10] ; save r9 in it's final stack slot
sub sp, sp, 12 ; skip JLI, LDI, EI
PUSH lp_count
PUSHAX lp_start
PUSHAX lp_end
PUSH blink
PUSH r11
PUSH r10
sub sp, sp, 4 ; skip r9
PUSH r8
PUSH r7
PUSH r6
PUSH r5
PUSH r4
PUSH r3
PUSH r2
PUSH r1
PUSH r0
.endif
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS #ifdef CONFIG_ARC_HAS_ACCL_REGS
PUSH r59 PUSH r59
PUSH r58 PUSH r58
...@@ -86,6 +113,33 @@ ...@@ -86,6 +113,33 @@
POP r59 POP r59
#endif #endif
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
.ifnc \called_from, exception
POP r0
POP r1
POP r2
POP r3
POP r4
POP r5
POP r6
POP r7
POP r8
POP r9
POP r10
POP r11
POP blink
POPAX lp_end
POPAX lp_start
POP r9
mov lp_count, r9
add sp, sp, 12 ; skip JLI, LDI, EI
ld.as r9, [sp, -10] ; reload r9 which got clobbered
.endif
#endif
.endm .endm
/*------------------------------------------------------------------------*/ /*------------------------------------------------------------------------*/
......
...@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
*/ */
"=&r" (tmp), "+r" (to), "+r" (from) "=&r" (tmp), "+r" (to), "+r" (from)
: :
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return n; return n;
} }
...@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/ */
"=&r" (tmp), "+r" (to), "+r" (from) "=&r" (tmp), "+r" (to), "+r" (from)
: :
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return n; return n;
} }
...@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) ...@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n" " .previous \n"
: "+r"(d_char), "+r"(res) : "+r"(d_char), "+r"(res)
: "i"(0) : "i"(0)
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return res; return res;
} }
...@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n" " .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count) : "g"(-EFAULT), "r"(count)
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return res; return res;
} }
......
...@@ -209,7 +209,9 @@ restore_regs: ...@@ -209,7 +209,9 @@ restore_regs:
;####### Return from Intr ####### ;####### Return from Intr #######
debug_marker_l1: debug_marker_l1:
bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
.Lisr_ret_fast_path: .Lisr_ret_fast_path:
; Handle special case #1: (Entry via Exception, Return via IRQ) ; Handle special case #1: (Entry via Exception, Return via IRQ)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP .macro CPU_EARLY_SETUP
...@@ -47,6 +48,15 @@ ...@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL] sr r5, [ARC_REG_DC_CTRL]
1: 1:
#ifdef CONFIG_ISA_ARCV2
; Unaligned access is disabled at reset, so re-enable early as
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
bset r5, r5, STATUS_AD_BIT
kflag r5
#endif
.endm .endm
.section .init.text, "ax",@progbits .section .init.text, "ax",@progbits
...@@ -93,9 +103,9 @@ ENTRY(stext) ...@@ -93,9 +103,9 @@ ENTRY(stext)
#ifdef CONFIG_ARC_UBOOT_SUPPORT #ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI ; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (board identity, unused as of now ; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem ; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in setup_arch() ; These are handled later in handle_uboot_args()
st r0, [@uboot_tag] st r0, [@uboot_tag]
st r2, [@uboot_arg] st r2, [@uboot_arg]
#endif #endif
......
...@@ -49,11 +49,13 @@ void arc_init_IRQ(void) ...@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
*(unsigned int *)&ictrl = 0; *(unsigned int *)&ictrl = 0;
#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1; ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
#endif
WRITE_AUX(AUX_IRQ_CTRL, ictrl); WRITE_AUX(AUX_IRQ_CTRL, ictrl);
......
...@@ -414,43 +414,80 @@ void setup_processor(void) ...@@ -414,43 +414,80 @@ void setup_processor(void)
arc_chk_core_config(); arc_chk_core_config();
} }
static inline int is_kernel(unsigned long addr) static inline bool uboot_arg_invalid(unsigned long addr)
{ {
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) /*
return 1; * Check that it is a untranslated address (although MMU is not enabled
return 0; * yet, it being a high address ensures this is not by fluke)
*/
if (addr < PAGE_OFFSET)
return true;
/* Check that address doesn't clobber resident kernel image */
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
} }
void __init setup_arch(char **cmdline_p) #define IGNORE_ARGS "Ignore U-boot args: "
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
void __init handle_uboot_args(void)
{ {
bool use_embedded_dtb = true;
bool append_cmdline = false;
#ifdef CONFIG_ARC_UBOOT_SUPPORT #ifdef CONFIG_ARC_UBOOT_SUPPORT
/* make sure that uboot passed pointer to cmdline/dtb is valid */ /* check that we know this tag */
if (uboot_tag && is_kernel((unsigned long)uboot_arg)) if (uboot_tag != UBOOT_TAG_NONE &&
panic("Invalid uboot arg\n"); uboot_tag != UBOOT_TAG_CMDLINE &&
uboot_tag != UBOOT_TAG_DTB) {
pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
goto ignore_uboot_args;
}
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
goto ignore_uboot_args;
}
/* see if U-boot passed an external Device Tree blob */
if (uboot_tag == UBOOT_TAG_DTB) {
machine_desc = setup_machine_fdt((void *)uboot_arg);
/* See if u-boot passed an external Device Tree blob */ /* external Device Tree blob is invalid - use embedded one */
machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ use_embedded_dtb = !machine_desc;
if (!machine_desc) }
if (uboot_tag == UBOOT_TAG_CMDLINE)
append_cmdline = true;
ignore_uboot_args:
#endif #endif
{
/* No, so try the embedded one */ if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start); machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc) if (!machine_desc)
panic("Embedded DT invalid\n"); panic("Embedded DT invalid\n");
}
/* /*
* If we are here, it is established that @uboot_arg didn't * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
* point to DT blob. Instead if u-boot says it is cmdline, * append processing can only happen after.
* append to embedded DT cmdline. */
* setup_machine_fdt() would have populated @boot_command_line if (append_cmdline) {
*/ /* Ensure a whitespace between the 2 cmdlines */
if (uboot_tag == 1) { strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
/* Ensure a whitespace between the 2 cmdlines */ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg,
COMMAND_LINE_SIZE);
}
} }
}
void __init setup_arch(char **cmdline_p)
{
handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */ /* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line; *cmdline_p = boot_command_line;
......
...@@ -25,15 +25,11 @@ ...@@ -25,15 +25,11 @@
#endif #endif
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5 # define ZOLSHFT 5
# define ZOLAND 0x1F # define ZOLAND 0x1F
#else #else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4] # define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4 # define ZOLSHFT 4
...@@ -41,8 +37,6 @@ ...@@ -41,8 +37,6 @@
#endif #endif
ENTRY_CFI(memcpy) ENTRY_CFI(memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2 mov.f 0, r2
;;; if size is zero ;;; if size is zero
jz.d [blink] jz.d [blink]
...@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) ...@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy32_64bytes lpnz @.Lcopy32_64bytes
;; LOOP START ;; LOOP START
LOADX (r6, r1) LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1) LOADX (r8, r1)
LOADX (r10, r1) LOADX (r10, r1)
LOADX (r4, r1) LOADX (r4, r1)
...@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) ...@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_1 lpnz @.Lcopy8bytes_1
;; LOOP START ;; LOOP START
ld.ab r6, [r1, 4] ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4] ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location