From 546c8c44f092b2f23291fe499c221efc8cabbb67 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 30 Mar 2016 17:43:07 +0200 Subject: arm64: move early boot code to the .init segment Apart from the arm64/linux and EFI header data structures, there is nothing in the .head.text section that must reside at the beginning of the Image. So let's move it to the .init section where it belongs. Note that this involves some minor tweaking of the EFI header, primarily because the address of 'stext' no longer coincides with the start of the .text section. It also requires a couple of relocated symbol references to be slightly rewritten or their definition moved to the linker script. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4203d5f257bc..b43417618847 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -102,8 +102,6 @@ _head: #endif #ifdef CONFIG_EFI - .globl __efistub_stext_offset - .set __efistub_stext_offset, stext - _head .align 3 pe_header: .ascii "PE" @@ -123,11 +121,11 @@ optional_header: .short 0x20b // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion - .long _end - stext // SizeOfCode + .long _end - efi_header_end // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_entry - _head // AddressOfEntryPoint - .long __efistub_stext_offset // BaseOfCode + .long efi_header_end - _head // BaseOfCode extra_header_fields: .quad 0 // ImageBase @@ -144,7 +142,7 @@ extra_header_fields: .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header - .long __efistub_stext_offset // SizeOfHeaders + .long efi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics @@ -188,10 +186,10 @@ section_table: .byte 0 .byte 0 .byte 0 // end of 0 padding of section name - .long _end - stext // VirtualSize - .long __efistub_stext_offset // VirtualAddress - .long _edata - stext // SizeOfRawData - .long __efistub_stext_offset // PointerToRawData + .long _end - efi_header_end // VirtualSize + .long efi_header_end - _head // VirtualAddress + .long _edata - efi_header_end // SizeOfRawData + .long efi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) @@ -200,15 +198,18 @@ section_table: .long 0xe0500020 // Characteristics (section flags) /* - * EFI will load stext onwards at the 4k section alignment + * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction * sequences using an adrp and a :lo12: immediate will function - * correctly at this alignment, we must ensure that stext is + * correctly at this alignment, we must ensure that .text is * placed at a 4k boundary in the Image to begin with. */ .align 12 +efi_header_end: #endif + __INIT + ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode @@ -223,12 +224,12 @@ ENTRY(stext) * the TCR will have been set. */ ldr x27, 0f // address to jump to after - // MMU has been enabled + neg x27, x27 // MMU has been enabled adr_l lr, __enable_mmu // return (PIC) address b __cpu_setup // initialise processor ENDPROC(stext) .align 3 -0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR +0: .quad (_text - TEXT_OFFSET) - __mmap_switched - KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 @@ -397,7 +398,7 @@ __create_page_tables: ldr x5, =KIMAGE_VADDR add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 - ldr w6, kernel_img_size + ldr w6, =kernel_img_size add x6, x6, x5 mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 @@ -414,9 +415,6 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) - -kernel_img_size: - .long _end - (_head - TEXT_OFFSET) .ltorg /* -- cgit v1.2.3 From 190c056fc32a528979807cb5d5a0d68285933073 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:41 +0200 Subject: arm64: kernel: don't export local symbols from head.S This unexports some symbols from head.S that are only used locally. Acked-by: Catalin Marinas Acked-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b43417618847..ac27d8d937b2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -638,7 +638,7 @@ ENDPROC(el2_setup) * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in x20. See arch/arm64/include/asm/virt.h for more info. */ -ENTRY(set_cpu_boot_mode_flag) +set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -691,7 +691,7 @@ ENTRY(secondary_entry) b secondary_startup ENDPROC(secondary_entry) -ENTRY(secondary_startup) +secondary_startup: /* * Common entry point for secondary CPUs. */ @@ -706,7 +706,7 @@ ENTRY(secondary_startup) ENDPROC(secondary_startup) 0: .long (_text - TEXT_OFFSET) - __secondary_switched -ENTRY(__secondary_switched) +__secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb -- cgit v1.2.3 From e5ebeec879b726c755af0c1c15f3699b53268cd5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:42 +0200 Subject: arm64: kernel: use literal for relocated address of __secondary_switched We can simply use a relocated 64-bit literal to store the address of __secondary_switched(), and the relocation code will ensure that it holds the correct value at secondary entry time, as long as we make sure that the literal is not dereferenced until after we have enabled the MMU. So jump via a small __secondary_switch() function covered by the ID map that performs the literal load and branch-to-register. Acked-by: Catalin Marinas Acked-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ac27d8d937b2..f13276d4ca91 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -468,9 +468,7 @@ __mmap_switched: str x15, [x11, x23] b 0b -2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr - dc cvac, x8 // value visible to secondaries - dsb sy // with MMU off +2: #endif adr_l sp, initial_sp, x4 @@ -699,12 +697,9 @@ secondary_startup: adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor - ldr x8, kimage_vaddr - ldr w9, 0f - sub x27, x8, w9, sxtw // address to jump to after enabling the MMU + adr_l x27, __secondary_switch // address to jump to after enabling the MMU b __enable_mmu ENDPROC(secondary_startup) -0: .long (_text - TEXT_OFFSET) - __secondary_switched __secondary_switched: adr_l x5, vectors @@ -806,3 +801,8 @@ __no_granule_support: wfi b 1b ENDPROC(__no_granule_support) + +__secondary_switch: + ldr x8, =__secondary_switched + br x8 +ENDPROC(__secondary_switch) -- cgit v1.2.3 From 0cd3defe0af4153ffc5fe39bcfa4abfc301984e9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:43 +0200 Subject: arm64: kernel: perform relocation processing from ID map Refactor the relocation processing so that the code executes from the ID map while accessing the relocation tables via the virtual mapping. This way, we can use literals containing virtual addresses as before, instead of having to use convoluted absolute expressions. For symmetry with the secondary code path, the relocation code and the subsequent jump to the virtual entry point are implemented in a function called __primary_switch(), and __mmap_switched() is renamed to __primary_switched(). Also, the call sequence in stext() is aligned with the one in secondary_startup(), by replacing the awkward 'adr_l lr' and 'b cpu_setup' sequence with a simple branch and link. Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 96 +++++++++++++++++++++++++----------------------- 1 file changed, 51 insertions(+), 45 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index f13276d4ca91..0d487d90d221 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -223,13 +223,11 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - ldr x27, 0f // address to jump to after - neg x27, x27 // MMU has been enabled - adr_l lr, __enable_mmu // return (PIC) address - b __cpu_setup // initialise processor + bl __cpu_setup // initialise processor + adr_l x27, __primary_switch // address to jump to after + // MMU has been enabled + b __enable_mmu ENDPROC(stext) - .align 3 -0: .quad (_text - TEXT_OFFSET) - __mmap_switched - KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 @@ -421,7 +419,7 @@ ENDPROC(__create_page_tables) * The following fragment of code is executed with the MMU enabled. */ .set initial_sp, init_thread_union + THREAD_START_SP -__mmap_switched: +__primary_switched: mov x28, lr // preserve LR adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address @@ -435,42 +433,6 @@ __mmap_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW -#ifdef CONFIG_RELOCATABLE - - /* - * Iterate over each entry in the relocation table, and apply the - * relocations in place. - */ - adr_l x8, __dynsym_start // start of symbol table - adr_l x9, __reloc_start // start of reloc table - adr_l x10, __reloc_end // end of reloc table - -0: cmp x9, x10 - b.hs 2f - ldp x11, x12, [x9], #24 - ldr x13, [x9, #-8] - cmp w12, #R_AARCH64_RELATIVE - b.ne 1f - add x13, x13, x23 // relocate - str x13, [x11, x23] - b 0b - -1: cmp w12, #R_AARCH64_ABS64 - b.ne 0b - add x12, x12, x12, lsl #1 // symtab offset: 24x top word - add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word - ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx - ldr x15, [x12, #8] // Elf64_Sym::st_value - cmp w14, #-0xf // SHN_ABS (0xfff1) ? - add x14, x15, x23 // relocate - csel x15, x14, x15, ne - add x15, x13, x15 - str x15, [x11, x23] - b 0b - -2: -#endif - adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) @@ -496,7 +458,7 @@ __mmap_switched: 0: #endif b start_kernel -ENDPROC(__mmap_switched) +ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for @@ -788,7 +750,6 @@ __enable_mmu: ic iallu // flush instructions fetched dsb nsh // via old mapping isb - add x27, x27, x23 // relocated __mmap_switched #endif br x27 ENDPROC(__enable_mmu) @@ -802,6 +763,51 @@ __no_granule_support: b 1b ENDPROC(__no_granule_support) +__primary_switch: +#ifdef CONFIG_RELOCATABLE + /* + * Iterate over each entry in the relocation table, and apply the + * relocations in place. + */ + ldr w8, =__dynsym_offset // offset to symbol table + ldr w9, =__rela_offset // offset to reloc table + ldr w10, =__rela_size // size of reloc table + + ldr x11, =KIMAGE_VADDR // default virtual offset + add x11, x11, x23 // actual virtual offset + add x8, x8, x11 // __va(.dynsym) + add x9, x9, x11 // __va(.rela) + add x10, x9, x10 // __va(.rela) + sizeof(.rela) + +0: cmp x9, x10 + b.hs 2f + ldp x11, x12, [x9], #24 + ldr x13, [x9, #-8] + cmp w12, #R_AARCH64_RELATIVE + b.ne 1f + add x13, x13, x23 // relocate + str x13, [x11, x23] + b 0b + +1: cmp w12, #R_AARCH64_ABS64 + b.ne 0b + add x12, x12, x12, lsl #1 // symtab offset: 24x top word + add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word + ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx + ldr x15, [x12, #8] // Elf64_Sym::st_value + cmp w14, #-0xf // SHN_ABS (0xfff1) ? + add x14, x15, x23 // relocate + csel x15, x14, x15, ne + add x15, x13, x15 + str x15, [x11, x23] + b 0b + +2: +#endif + ldr x8, =__primary_switched + br x8 +ENDPROC(__primary_switch) + __secondary_switch: ldr x8, =__secondary_switched br x8 -- cgit v1.2.3 From b03cc885328e3c0de61843737d42eb0a0f112aab Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:45 +0200 Subject: arm64: kernel: replace early 64-bit literal loads with move-immediates When building a relocatable kernel, we currently rely on the fact that early 64-bit literal loads need to be deferred to after the relocation has been performed only if they involve symbol references, and not if they involve assemble time constants. While this is not an unreasonable assumption to make, it is better to switch to movk/movz sequences, since these are guaranteed to be resolved at link time, simply because there are no dynamic relocation types to describe them. Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0d487d90d221..dae9cabaadf5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -337,7 +337,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b - ldr x7, =SWAPPER_MM_MMUFLAGS + mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. @@ -393,7 +393,7 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir - ldr x5, =KIMAGE_VADDR + mov_q x5, KIMAGE_VADDR add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 ldr w6, =kernel_img_size @@ -631,7 +631,7 @@ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 - ldr x1, =MPIDR_HWID_BITMASK + mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] @@ -773,7 +773,7 @@ __primary_switch: ldr w9, =__rela_offset // offset to reloc table ldr w10, =__rela_size // size of reloc table - ldr x11, =KIMAGE_VADDR // default virtual offset + mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset add x8, x8, x11 // __va(.dynsym) add x9, x9, x11 // __va(.rela) -- cgit v1.2.3 From 18b9c0d641938242d8bcdba3c14a8f2beec2a97e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:46 +0200 Subject: arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it For historical reasons, the kernel Image must be loaded into physical memory at a 512 KB offset above a 2 MB aligned base address. The region between the base address and the start of the kernel Image has no significance to the kernel itself, but it is currently mapped explicitly into the early kernel VMA range for all translation granules. In some cases (i.e., 4 KB granule), this is unavoidable, due to the 2 MB granularity of the early kernel mappings. However, in other cases, e.g., when running with larger page sizes, or in the future, with more granular KASLR, there is no reason to map it explicitly like we do currently. So update the logic so that the region is mapped only if that happens as a side effect of rounding the start address of the kernel to swapper block size, and leave it unmapped otherwise. Since the symbol kernel_img_size now simply resolves to the memory footprint of the kernel Image, we can drop its definition from image.h and opencode its calculation. Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index dae9cabaadf5..c5e5edca6897 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -393,12 +393,13 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir - mov_q x5, KIMAGE_VADDR + mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 - ldr w6, =kernel_img_size - add x6, x6, x5 - mov x3, x24 // phys offset + adrp x6, _end // runtime __pa(_end) + adrp x3, _text // runtime __pa(_text) + sub x6, x6, x3 // _end - _text + add x6, x6, x5 // runtime __va(_end) create_block_map x0, x7, x3, x5, x6 /* -- cgit v1.2.3 From 08cdac619c81b3fa8cd73aeed2330ffe0a0b73ca Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 18 Apr 2016 17:09:47 +0200 Subject: arm64: relocatable: deal with physically misaligned kernel images When booting a relocatable kernel image, there is no practical reason to refuse an image whose load address is not exactly TEXT_OFFSET bytes above a 2 MB aligned base address, as long as the physical and virtual misalignment with respect to the swapper block size are equal, and are both aligned to THREAD_SIZE. Since the virtual misalignment is under our control when we first enter the kernel proper, we can simply choose its value to be equal to the physical misalignment. So treat the misalignment of the physical load address as the initial KASLR offset, and fix up the remaining code to deal with that. Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c5e5edca6897..00a32101ab51 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -213,8 +214,8 @@ efi_header_end: ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode - mov x23, xzr // KASLR offset, defaults to 0 adrp x24, __PHYS_OFFSET + and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* @@ -449,11 +450,13 @@ __primary_switched: bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE - cbnz x23, 0f // already running randomized? + tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? + b.ne 0f mov x0, x21 // pass FDT address in x0 + mov x1, x23 // pass modulo offset in x1 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed - mov x23, x0 // record KASLR offset + orr x23, x23, x0 // record KASLR offset ret x28 // we must enable KASLR, return // to __enable_mmu() 0: -- cgit v1.2.3 From cabe1c81ea5be983425d117912d7883e252a3b09 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:07 +0100 Subject: arm64: Change cpu_resume() to enable mmu early then access sleep_sp by va By enabling the MMU early in cpu_resume(), the sleep_save_sp and stack can be accessed by VA, which avoids the need to convert-addresses and clean to PoC on the suspend path. MMU setup is shared with the boot path, meaning the swapper_pg_dir is restored directly: ttbr1_el1 is no longer saved/restored. struct sleep_save_sp is removed, replacing it with a single array of pointers. cpu_do_{suspend,resume} could be further reduced to not restore: cpacr_el1, mdscr_el1, tcr_el1, vbar_el1 and sctlr_el1, all of which are set by __cpu_setup(). However these values all contain res0 bits that may be used to enable future features. Signed-off-by: James Morse Reviewed-by: Lorenzo Pieralisi Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 00a32101ab51..0674384cfc74 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -716,7 +716,7 @@ ENTRY(__early_cpu_boot_status) * If it isn't, park the CPU */ .section ".idmap.text", "ax" -__enable_mmu: +ENTRY(__enable_mmu) mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 -- cgit v1.2.3 From 28c7258330ee4ce701a4da7af96d6605d1a0b3bd Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:09 +0100 Subject: arm64: Promote KERNEL_START/KERNEL_END definitions to a header file KERNEL_START and KERNEL_END are useful outside head.S, move them to a header file. Signed-off-by: James Morse Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm64/kernel/head.S') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0674384cfc74..564c340cef03 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -52,9 +52,6 @@ #error TEXT_OFFSET must be less than 2MB #endif -#define KERNEL_START _text -#define KERNEL_END _end - /* * Kernel startup entry point. * --------------------------- -- cgit v1.2.3