Commit fa5fd7c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "Here is the core arm64 queue for 4.5.  As you might expect, the
  Christmas break resulted in a number of patches not making the final
  cut, so 4.6 is likely to be larger than usual.  There's still some
  useful stuff here, however, and it's detailed below.

  The EFI changes have been Reviewed-by Matt and the memblock change got
  an "OK" from akpm.

  Summary:

   - Support for a separate IRQ stack, although we haven't reduced the
     size of our thread stack just yet since we don't have enough data
     to determine a safe value

   - Refactoring of our EFI initialisation and runtime code into
     drivers/firmware/efi/ so that it can be reused by arch/arm/.

   - Ftrace improvements when unwinding in the function graph tracer

   - Document our silicon errata handling process

   - Cache flushing optimisation when mapping executable pages

   - Support for hugetlb mappings using the contiguous hint in the pte"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (45 commits)
  arm64: head.S: use memset to clear BSS
  efi: stub: define DISABLE_BRANCH_PROFILING for all architectures
  arm64: entry: remove pointless SPSR mode check
  arm64: mm: move pgd_cache initialisation to pgtable_cache_init
  arm64: module: avoid undefined shift behavior in reloc_data()
  arm64: module: fix relocation of movz instruction with negative immediate
  arm64: traps: address fallout from printk -> pr_* conversion
  arm64: ftrace: fix a stack tracer's output under function graph tracer
  arm64: pass a task parameter to unwind_frame()
  arm64: ftrace: modify a stack frame in a safe way
  arm64: remove irq_count and do_softirq_own_stack()
  arm64: hugetlb: add support for PTE contiguous bit
  arm64: Use PoU cache instr for I/D coherency
  arm64: Defer dcache flush in __cpu_copy_user_page
  arm64: reduce stack use in irq_handler
  arm64: mm: ensure that the zero page is visible to the page table walker
  arm64: Documentation: add list of software workarounds for errata
  arm64: mm: place __cpu_setup in .text
  arm64: cmpxchg: Don't incldue linux/mmdebug.h
  arm64: mm: fold alternatives into .init
  ...
parents 19e2fc40 2a803c4d
Silicon Errata and Software Workarounds
=======================================
Author: Will Deacon <will.deacon@arm.com>
Date : 27 November 2015
It is an unfortunate fact of life that hardware is often produced with
so-called "errata", which can cause it to deviate from the architecture
under specific circumstances. For hardware produced by ARM, these
errata are broadly classified into the following categories:
Category A: A critical error without a viable workaround.
Category B: A significant or critical error with an acceptable
workaround.
Category C: A minor error that is not expected to occur under normal
operation.
For more information, consult one of the "Software Developers Errata
Notice" documents available on infocenter.arm.com (registration
required).
As far as Linux is concerned, Category B errata may require some special
treatment in the operating system. For example, avoiding a particular
sequence of code, or configuring the processor in a particular way. A
less common situation may require similar actions in order to declassify
a Category A erratum into a Category C erratum. These are collectively
known as "software workarounds" and are only required in the minority of
cases (e.g. those cases that both require a non-secure workaround *and*
can be triggered by Linux).
For software workarounds that may adversely impact systems unaffected by
the erratum in question, a Kconfig entry is added under "Kernel
Features" -> "ARM errata workarounds via the alternatives framework".
These are enabled by default and patched in at runtime when an affected
CPU is detected. For less-intrusive workarounds, a Kconfig option is not
available and the code is structured (preferably with a comment) in such
a way that the erratum will not be hit.
This approach can make it slightly onerous to determine exactly which
errata are worked around in an arbitrary kernel source tree, so this
file acts as a registry of software workarounds in the Linux Kernel and
will be updated when new workarounds are committed and backported to
stable kernels.
| Implementor | Component | Erratum ID | Kconfig |
+----------------+-----------------+-----------------+-------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
......@@ -9,7 +9,7 @@
| alpha: | .. |
| arc: | TODO |
| arm: | ok |
| arm64: | .. |
| arm64: | ok |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
......
......@@ -70,6 +70,7 @@ config ARM64
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
......@@ -529,9 +530,6 @@ config HW_PERF_EVENTS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
config ARCH_WANT_HUGE_PMD_SHARE
def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
......
......@@ -19,7 +19,6 @@ struct alt_instr {
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
void free_alternatives_memory(void);
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
......
......@@ -193,6 +193,17 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
/*
* @sym: The name of the per-cpu variable
* @reg: Result of per_cpu(sym, smp_processor_id())
* @tmp: scratch register
*/
.macro this_cpu_ptr, sym, reg, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el1
add \reg, \reg, \tmp
.endm
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
......
......@@ -68,6 +68,7 @@
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
......
......@@ -19,7 +19,6 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
#include <linux/mmdebug.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
......
......@@ -2,7 +2,9 @@
#define _ASM_EFI_H
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/neon.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_EFI
extern void efi_init(void);
......@@ -10,6 +12,8 @@ extern void efi_init(void);
#define efi_init()
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
#define efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f; \
......@@ -63,6 +67,11 @@ extern void efi_init(void);
* Services are enabled and the EFI_RUNTIME_SERVICES bit set.
*/
static inline void efi_set_pgd(struct mm_struct *mm)
{
switch_mm(NULL, mm, NULL);
}
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
......
......@@ -28,6 +28,8 @@ struct dyn_arch_ftrace {
extern unsigned long ftrace_graph_call;
extern void return_to_handler(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
......
......@@ -26,36 +26,7 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
ptep_clear_flush(vma, addr, ptep);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
......@@ -97,4 +68,19 @@ static inline void arch_clear_hugepage_flags(struct page *page)
clear_bit(PG_dcache_clean, &page->flags);
}
extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable);
#define arch_make_huge_pte arch_make_huge_pte
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#endif /* __ASM_HUGETLB_H */
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START_SP THREAD_START_SP
#ifndef __ASSEMBLER__
#include <linux/percpu.h>
#include <asm-generic/irq.h>
#include <asm/thread_info.h>
struct pt_regs;
DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
/*
* The highest address on the stack, and the first to be used. Used to
* find the dummy-stack frame put down by el?_irq() in entry.S, which
* is structured as follows:
*
* ------------
* | | <- irq_stack_ptr
* top ------------
* | x19 | <- irq_stack_ptr - 0x08
* ------------
* | x29 | <- irq_stack_ptr - 0x10
* ------------
*
* where x19 holds a copy of the task stack pointer where the struct pt_regs
* from kernel_entry can be found.
*
*/
#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
/*
* The offset from irq_stack_ptr where entry.S will store the original
* stack pointer. Used by unwind_frame() and dump_backtrace().
*/
#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
......@@ -12,4 +47,14 @@ static inline int nr_legacy_irqs(void)
return 0;
}
static inline bool on_irq_stack(unsigned long sp, int cpu)
{
/* variable names the same as kernel/stacktrace.c */
unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
unsigned long high = low + IRQ_STACK_START_SP;
return (low <= sp && sp <= high);
}
#endif /* !__ASSEMBLER__ */
#endif
......@@ -90,7 +90,23 @@
/*
* Contiguous page definitions.
*/
#define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
#ifdef CONFIG_ARM64_64K_PAGES
#define CONT_PTE_SHIFT 5
#define CONT_PMD_SHIFT 5
#elif defined(CONFIG_ARM64_16K_PAGES)
#define CONT_PTE_SHIFT 7
#define CONT_PMD_SHIFT 5
#else
#define CONT_PTE_SHIFT 4
#define CONT_PMD_SHIFT 4
#endif
#define CONT_PTES (1 << CONT_PTE_SHIFT)
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
#define CONT_PMDS (1 << CONT_PMD_SHIFT)
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
/* the the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
......
......@@ -167,6 +167,16 @@ extern struct page *empty_zero_page;
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
*/
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
......@@ -217,7 +227,8 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkcont(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_CONT));
pte = set_pte_bit(pte, __pgprot(PTE_CONT));
return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
}
static inline pte_t pte_mknoncont(pte_t pte)
......@@ -225,6 +236,11 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
......@@ -298,7 +314,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Hugetlb definitions.
*/
#define HUGE_MAX_HSTATE 2
#define HUGE_MAX_HSTATE 4
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
......@@ -664,7 +680,8 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
void pgd_cache_init(void);
#define pgtable_cache_init pgd_cache_init
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
......
......@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
#define COMPAT_SHMLBA 0x4000
#define COMPAT_SHMLBA (4 * PAGE_SIZE)
#include <asm-generic/shmparam.h>
......
......@@ -26,9 +26,28 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
unsigned int tmp;
arch_spinlock_t lockval;
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
" eor %w1, %w0, %w0, ror #16\n"
" cbnz %w1, 1b\n"
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
/* LSE atomics */
" nop\n"
" nop\n")
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
:
: "memory");
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
......
......@@ -16,14 +16,19 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
struct task_struct;
struct stackframe {
unsigned long fp;
unsigned long sp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int graph;
#endif
};
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
#endif /* __ASM_STACKTRACE_H */
......@@ -73,10 +73,16 @@ register unsigned long current_stack_pointer asm ("sp");
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
/*
* struct thread_info can be accessed directly via sp_el0.
*/
static inline struct thread_info *current_thread_info(void)
{
return (struct thread_info *)
(current_stack_pointer & ~(THREAD_SIZE - 1));
unsigned long sp_el0;
asm ("mrs %0, sp_el0" : "=r" (sp_el0));
return (struct thread_info *)sp_el0;
}
#define thread_saved_pc(tsk) \
......
......@@ -158,9 +158,3 @@ void apply_alternatives(void *start, size_t length)
__apply_alternatives(&region);
}
void free_alternatives_memory(void)
{
free_reserved_area(__alt_instructions, __alt_instructions_end,
0, "alternatives");
}
......@@ -62,7 +62,7 @@ struct insn_emulation {
};
static LIST_HEAD(insn_emulation);
static int nr_insn_emulated;
static int nr_insn_emulated __initdata;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
......@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
static void register_insn_emulation(struct insn_emulation_ops *ops)
static void __init register_insn_emulation(struct insn_emulation_ops *ops)
{
unsigned long flags;
struct insn_emulation *insn;
......@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
{ }
};
static void register_insn_emulation_sysctl(struct ctl_table *table)
static void __init register_insn_emulation_sysctl(struct ctl_table *table)
{
unsigned long flags;
int i = 0;
......
......@@ -684,7 +684,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
{},
};
static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
......@@ -729,7 +729,7 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
return rc;
}
static void setup_cpu_hwcaps(void)
static void __init setup_cpu_hwcaps(void)
{
int i;
const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
......@@ -758,7 +758,8 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
static void __init
enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
int i;
......@@ -897,7 +898,7 @@ static inline void set_sys_caps_initialised(void)
#endif /* CONFIG_HOTPLUG_CPU */
static void setup_feature_capabilities(void)
static void __init setup_feature_capabilities(void)
{
update_cpu_capabilities(arm64_features, "detected feature:");
enable_cpu_capabilities(arm64_features);
......
......@@ -11,317 +11,34 @@
*
*/
#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
struct efi_memory_map memmap;
static u64 efi_system_table;
static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
static struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.pgd = efi_pgd,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
};
static int __init is_normal_ram(efi_memory_desc_t *md)
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
if (md->attribute & EFI_MEMORY_WB)
return 1;
return 0;
}
/*
* Translate a EFI virtual address into a physical address: this is necessary,
* as some data members of the EFI system table are virtually remapped after
* SetVirtualAddressMap() has been called.
*/
static phys_addr_t efi_to_phys(unsigned long addr)
{
efi_memory_desc_t *md;
for_each_efi_memory_desc(&memmap, md) {
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (md->virt_addr == 0)
/* no virtual mapping has been installed by the stub */
break;
if (md->virt_addr <= addr &&
(addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
return md->phys_addr + addr - md->virt_addr;
}
return addr;
}
static int __init uefi_init(void)
{
efi_char16_t *c16;
void *config_tables;
u64 table_size;
char vendor[100] = "unknown";
int i, retval;
efi.systab = early_memremap(efi_system_table,
sizeof(efi_system_table_t));
if (efi.systab == NULL) {
pr_warn("Unable to map EFI system table.\n");
return -ENOMEM;
}
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_64BIT, &efi.flags);
/*
* Verify the EFI Table
*/
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect\n");
retval = -EINVAL;
goto out;
}
if ((efi.systab->hdr.revision >> 16) < 2)
pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
vendor[i] = '\0';
early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
pr_info("EFI v%u.%.02u by %s\n",
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
config_tables = early_memremap(efi_to_phys(efi.systab->tables),
table_size);
if (config_tables == NULL) {
pr_warn("Unable to map EFI config table array.\n");
retval = -ENOMEM;
goto out;
}
retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
sizeof(efi_config_table_64_t), NULL);
early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
return retval;
}
/*
* Return true for RAM regions we want to permanently reserve.
*/
static __init int is_reserve_region(efi_memory_desc_t *md)
{
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
return 0;
default:
break;
}
return is_normal_ram(md);
}
static __init void reserve_regions(void)
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
for_each_efi_memory_desc(&memmap, md) {
paddr = md->phys_addr;
npages = md->num_pages;
if (efi_enabled(EFI_DBG)) {
char buf[64];
pr_info(" 0x%012llx-0x%012llx %s",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
efi_md_typeattr_format(buf, sizeof(buf), md));
}
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
if (is_reserve_region(md)) {