machine_kexec_32.c 4.13 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
12
#include <linux/init.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
13
#include <linux/numa.h>
14 15 16 17 18 19 20
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/cpufeature.h>
21
#include <asm/desc.h>
22
#include <asm/system.h>
23 24

#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
25 26 27 28
static u32 kexec_pgd[1024] PAGE_ALIGNED;
#ifdef CONFIG_X86_PAE
static u32 kexec_pmd0[1024] PAGE_ALIGNED;
static u32 kexec_pmd1[1024] PAGE_ALIGNED;
29
#endif
30 31
static u32 kexec_pte0[1024] PAGE_ALIGNED;
static u32 kexec_pte1[1024] PAGE_ALIGNED;
32 33 34

static void set_idt(void *newidt, __u16 limit)
{
35
	struct desc_ptr curidt;
36 37

	/* ia32 supports unaliged loads & stores */
38 39
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
40

41
	load_idt(&curidt);
42 43 44 45 46
};


static void set_gdt(void *newgdt, __u16 limit)
{
47
	struct desc_ptr curgdt;
48 49

	/* ia32 supports unaligned loads & stores */
50 51
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
52

53
	load_gdt(&curgdt);
54 55 56 57 58 59 60 61 62 63
};

static void load_segments(void)
{
#define __STR(X) #X
#define STR(X) __STR(X)

	__asm__ __volatile__ (
		"\tljmp $"STR(__KERNEL_CS)",$1f\n"
		"\t1:\n"
Michael Matz's avatar
Michael Matz committed
64 65 66 67 68 69 70
		"\tmovl $"STR(__KERNEL_DS)",%%eax\n"
		"\tmovl %%eax,%%ds\n"
		"\tmovl %%eax,%%es\n"
		"\tmovl %%eax,%%fs\n"
		"\tmovl %%eax,%%gs\n"
		"\tmovl %%eax,%%ss\n"
		::: "eax", "memory");
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
#undef STR
#undef __STR
}

/*
 * A architecture hook called to validate the
 * proposed image and prepare the control pages
 * as needed.  The pages for KEXEC_CONTROL_CODE_SIZE
 * have been allocated, but the segments have yet
 * been copied into the kernel.
 *
 * Do what every setup is needed on image and the
 * reboot code buffer to allow us to avoid allocations
 * later.
 *
 * Currently nothing.
 */
int machine_kexec_prepare(struct kimage *image)
{
	return 0;
}

/*
 * Undo anything leftover by machine_kexec_prepare
 * when an image is freed.
 */
void machine_kexec_cleanup(struct kimage *image)
{
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
NORET_TYPE void machine_kexec(struct kimage *image)
{
107 108
	unsigned long page_list[PAGES_NR];
	void *control_page;
109 110 111 112

	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	control_page = page_address(image->control_code_page);
	memcpy(control_page, relocate_kernel, PAGE_SIZE);

	page_list[PA_CONTROL_PAGE] = __pa(control_page);
	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
	page_list[PA_PGD] = __pa(kexec_pgd);
	page_list[VA_PGD] = (unsigned long)kexec_pgd;
#ifdef CONFIG_X86_PAE
	page_list[PA_PMD_0] = __pa(kexec_pmd0);
	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
	page_list[PA_PMD_1] = __pa(kexec_pmd1);
	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
#endif
	page_list[PA_PTE_0] = __pa(kexec_pte0);
	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
	page_list[PA_PTE_1] = __pa(kexec_pte1);
	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
130

131 132 133 134 135
	/* The segment registers are funny things, they have both a
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
136 137 138 139 140 141 142 143 144 145 146 147
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
	/* The gdt & idt are now invalid.
	 * If you want to load them you must set up your own idt & gdt.
	 */
	set_gdt(phys_to_virt(0),0);
	set_idt(phys_to_virt(0),0);

	/* now call it */
148 149
	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
			image->start, cpu_has_pae);
150
}
151

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
152 153 154
void arch_crash_save_vmcoreinfo(void)
{
#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
155 156
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
157 158
#endif
#ifdef CONFIG_X86_PAE
159
	VMCOREINFO_CONFIG(X86_PAE);
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
160 161 162
#endif
}