mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
e31aa453bb
Using LOAD_REG_IMMEDIATE to get the address of kernel symbols generates 5 instructions where LOAD_REG_ADDR can do it in one, and will generate R_PPC64_ADDR16_* relocations in the output when we get to making the kernel as a position-independent executable, which we'd rather not have to handle. This changes various bits of assembly code to use LOAD_REG_ADDR when we need to get the address of a symbol, or to use suitable position-independent code for cases where we can't access the TOC for various reasons, or if we're not running at the address we were linked at. It also cleans up a few minor things; there's no reason to save and restore SRR0/1 around RTAS calls, __mmu_off can get the return address from LR more conveniently than the caller can supply it in R4 (and we already assume elsewhere that EA == RA if the MMU is on in early boot), and enable_64b_mode was using 5 instructions where 2 would do. Signed-off-by: Paul Mackerras <paulus@samba.org>
193 lines
3.6 KiB
ArmAsm
193 lines
3.6 KiB
ArmAsm
/*
|
|
* This file contains low level CPU setup functions.
|
|
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/page.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
|
|
_GLOBAL(__cpu_preinit_ppc970)
|
|
/* Do nothing if not running in HV mode */
|
|
mfmsr r0
|
|
rldicl. r0,r0,4,63
|
|
beqlr
|
|
|
|
/* Make sure HID4:rm_ci is off before MMU is turned off, that large
|
|
* pages are enabled with HID4:61 and clear HID5:DCBZ_size and
|
|
* HID5:DCBZ32_ill
|
|
*/
|
|
li r0,0
|
|
mfspr r3,SPRN_HID4
|
|
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
|
|
rldimi r3,r0,2,61 /* clear bit 61 (lg_pg_en) */
|
|
sync
|
|
mtspr SPRN_HID4,r3
|
|
isync
|
|
sync
|
|
mfspr r3,SPRN_HID5
|
|
rldimi r3,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
|
|
sync
|
|
mtspr SPRN_HID5,r3
|
|
isync
|
|
sync
|
|
|
|
/* Setup some basic HID1 features */
|
|
mfspr r0,SPRN_HID1
|
|
li r3,0x1200 /* enable i-fetch cacheability */
|
|
sldi r3,r3,44 /* and prefetch */
|
|
or r0,r0,r3
|
|
mtspr SPRN_HID1,r0
|
|
mtspr SPRN_HID1,r0
|
|
isync
|
|
|
|
/* Clear HIOR */
|
|
li r0,0
|
|
sync
|
|
mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
|
|
isync
|
|
blr
|
|
|
|
/* Definitions for the table use to save CPU states */
|
|
#define CS_HID0 0
|
|
#define CS_HID1 8
|
|
#define CS_HID4 16
|
|
#define CS_HID5 24
|
|
#define CS_SIZE 32
|
|
|
|
.data
|
|
.balign L1_CACHE_BYTES,0
|
|
cpu_state_storage:
|
|
.space CS_SIZE
|
|
.balign L1_CACHE_BYTES,0
|
|
.text
|
|
|
|
|
|
_GLOBAL(__setup_cpu_ppc970)
|
|
/* Do nothing if not running in HV mode */
|
|
mfmsr r0
|
|
rldicl. r0,r0,4,63
|
|
beqlr
|
|
|
|
mfspr r0,SPRN_HID0
|
|
li r11,5 /* clear DOZE and SLEEP */
|
|
rldimi r0,r11,52,8 /* set NAP and DPM */
|
|
li r11,0
|
|
rldimi r0,r11,32,31 /* clear EN_ATTN */
|
|
b load_hids /* Jump to shared code */
|
|
|
|
|
|
_GLOBAL(__setup_cpu_ppc970MP)
|
|
/* Do nothing if not running in HV mode */
|
|
mfmsr r0
|
|
rldicl. r0,r0,4,63
|
|
beqlr
|
|
|
|
mfspr r0,SPRN_HID0
|
|
li r11,0x15 /* clear DOZE and SLEEP */
|
|
rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */
|
|
li r11,0
|
|
rldimi r0,r11,32,31 /* clear EN_ATTN */
|
|
|
|
load_hids:
|
|
mtspr SPRN_HID0,r0
|
|
mfspr r0,SPRN_HID0
|
|
mfspr r0,SPRN_HID0
|
|
mfspr r0,SPRN_HID0
|
|
mfspr r0,SPRN_HID0
|
|
mfspr r0,SPRN_HID0
|
|
mfspr r0,SPRN_HID0
|
|
sync
|
|
isync
|
|
|
|
/* Save away cpu state */
|
|
LOAD_REG_ADDR(r5,cpu_state_storage)
|
|
|
|
/* Save HID0,1,4 and 5 */
|
|
mfspr r3,SPRN_HID0
|
|
std r3,CS_HID0(r5)
|
|
mfspr r3,SPRN_HID1
|
|
std r3,CS_HID1(r5)
|
|
mfspr r3,SPRN_HID4
|
|
std r3,CS_HID4(r5)
|
|
mfspr r3,SPRN_HID5
|
|
std r3,CS_HID5(r5)
|
|
|
|
blr
|
|
|
|
/* Called with no MMU context (typically MSR:IR/DR off) to
|
|
* restore CPU state as backed up by the previous
|
|
* function. This does not include cache setting
|
|
*/
|
|
_GLOBAL(__restore_cpu_ppc970)
|
|
/* Do nothing if not running in HV mode */
|
|
mfmsr r0
|
|
rldicl. r0,r0,4,63
|
|
beqlr
|
|
|
|
LOAD_REG_ADDR(r5,cpu_state_storage)
|
|
/* Before accessing memory, we make sure rm_ci is clear */
|
|
li r0,0
|
|
mfspr r3,SPRN_HID4
|
|
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
|
|
sync
|
|
mtspr SPRN_HID4,r3
|
|
isync
|
|
sync
|
|
|
|
/* Clear interrupt prefix */
|
|
li r0,0
|
|
sync
|
|
mtspr SPRN_HIOR,0
|
|
isync
|
|
|
|
/* Restore HID0 */
|
|
ld r3,CS_HID0(r5)
|
|
sync
|
|
isync
|
|
mtspr SPRN_HID0,r3
|
|
mfspr r3,SPRN_HID0
|
|
mfspr r3,SPRN_HID0
|
|
mfspr r3,SPRN_HID0
|
|
mfspr r3,SPRN_HID0
|
|
mfspr r3,SPRN_HID0
|
|
mfspr r3,SPRN_HID0
|
|
sync
|
|
isync
|
|
|
|
/* Restore HID1 */
|
|
ld r3,CS_HID1(r5)
|
|
sync
|
|
isync
|
|
mtspr SPRN_HID1,r3
|
|
mtspr SPRN_HID1,r3
|
|
sync
|
|
isync
|
|
|
|
/* Restore HID4 */
|
|
ld r3,CS_HID4(r5)
|
|
sync
|
|
isync
|
|
mtspr SPRN_HID4,r3
|
|
sync
|
|
isync
|
|
|
|
/* Restore HID5 */
|
|
ld r3,CS_HID5(r5)
|
|
sync
|
|
isync
|
|
mtspr SPRN_HID5,r3
|
|
sync
|
|
isync
|
|
blr
|
|
|