ARM: import arm-smccc code from Linux 4.11-rc6

Imports ARM SMC Calling Convention code from Linux 4.11-rc6.
The files have been copied as follows:

[Linux]                           [U-Boot]
arch/arm/kernel/smccc-call.S   -> arch/arm/cpu/armv7/smccc-call.S
arch/arm64/kernel/smccc-call.S -> arch/arm/cpu/armv8/smccc-call.S
arch/arm/include/asm/opcodes*  -> arch/arm/include/asm/opcodes*
include/linux/arm-smccc.h      -> include/linux/arm-smccc.h

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
This commit is contained in:
Masahiro Yamada 2017-04-14 11:10:22 +09:00 committed by Tom Rini
parent 84a112a1a5
commit c2da86f39e
6 changed files with 544 additions and 0 deletions

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2015, Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/linkage.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
* SMCCC asm macro is expanded.
*/
.macro SMCCC_SMC
__SMC(0)
.endm
.macro SMCCC_HVC
__HVC(0)
.endm
.macro SMCCC instr
UNWIND( .fnstart)
mov r12, sp
push {r4-r7}
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
UNWIND( .fnend)
.endm
/*
* void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(__arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(__arm_smccc_hvc)

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2015, Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License Version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
.macro SMCCC instr
.cfi_startproc
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
ldr x4, [sp, #8]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret
.cfi_endproc
.endm
/*
* void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC smc
ENDPROC(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC hvc
ENDPROC(__arm_smccc_hvc)

View file

@ -0,0 +1,24 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
#ifndef __ASM_ARM_OPCODES_SEC_H
#define __ASM_ARM_OPCODES_SEC_H
#include <asm/opcodes.h>
#define __SMC(imm4) __inst_arm_thumb32( \
0xE1600070 | (((imm4) & 0xF) << 0), \
0xF7F08000 | (((imm4) & 0xF) << 16) \
)
#endif /* __ASM_ARM_OPCODES_SEC_H */

View file

@ -0,0 +1,39 @@
/*
* opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
* Copyright (C) 2012 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __ASM_ARM_OPCODES_VIRT_H
#define __ASM_ARM_OPCODES_VIRT_H
#include <asm/opcodes.h>
#define __HVC(imm16) __inst_arm_thumb32( \
0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \
0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
)
#define __ERET __inst_arm_thumb32( \
0xE160006E, \
0xF3DE8F00 \
)
#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
0xE12EF300 | regnum, \
0xF3808E30 | (regnum << 16) \
)
#endif /* ! __ASM_ARM_OPCODES_VIRT_H */

View file

@ -0,0 +1,231 @@
/*
* arch/arm/include/asm/opcodes.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_OPCODES_H
#define __ASM_ARM_OPCODES_H
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#endif
#define ARM_OPCODE_CONDTEST_FAIL 0
#define ARM_OPCODE_CONDTEST_PASS 1
#define ARM_OPCODE_CONDTEST_UNCOND 2
/*
* Assembler opcode byteswap helpers.
* These are only intended for use by this header: don't use them directly,
* because they will be suboptimal in most cases.
*/
#define ___asm_opcode_swab32(x) ( \
(((x) << 24) & 0xFF000000) \
| (((x) << 8) & 0x00FF0000) \
| (((x) >> 8) & 0x0000FF00) \
| (((x) >> 24) & 0x000000FF) \
)
#define ___asm_opcode_swab16(x) ( \
(((x) << 8) & 0xFF00) \
| (((x) >> 8) & 0x00FF) \
)
#define ___asm_opcode_swahb32(x) ( \
(((x) << 8) & 0xFF00FF00) \
| (((x) >> 8) & 0x00FF00FF) \
)
#define ___asm_opcode_swahw32(x) ( \
(((x) << 16) & 0xFFFF0000) \
| (((x) >> 16) & 0x0000FFFF) \
)
#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
/*
* Opcode byteswap helpers
*
* These macros help with converting instructions between a canonical integer
* format and in-memory representation, in an endianness-agnostic manner.
*
* __mem_to_opcode_*() convert from in-memory representation to canonical form.
* __opcode_to_mem_*() convert from canonical form to in-memory representation.
*
*
* Canonical instruction representation:
*
* ARM: 0xKKLLMMNN
* Thumb 16-bit: 0x0000KKLL, where KK < 0xE8
* Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8
*
* There is no way to distinguish an ARM instruction in canonical representation
* from a Thumb instruction (just as these cannot be distinguished in memory).
* Where this distinction is important, it needs to be tracked separately.
*
* Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
* represent any valid Thumb-2 instruction. For this range,
* __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
*
* The ___asm variants are intended only for use by this header, in situations
* involving inline assembler. For .S files, the normal __opcode_*() macros
* should do the right thing.
*/
#ifdef __ASSEMBLY__
#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
#else /* ! __ASSEMBLY__ */
#include <linux/types.h>
#include <linux/swab.h>
#define ___opcode_swab32(x) swab32(x)
#define ___opcode_swab16(x) swab16(x)
#define ___opcode_swahb32(x) swahb32(x)
#define ___opcode_swahw32(x) swahw32(x)
#define ___opcode_identity32(x) ((u32)(x))
#define ___opcode_identity16(x) ((u16)(x))
#endif /* ! __ASSEMBLY__ */
#ifdef CONFIG_CPU_ENDIAN_BE8
#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
#else /* ! CONFIG_CPU_ENDIAN_BE8 */
#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
#ifndef CONFIG_CPU_ENDIAN_BE32
/*
* On BE32 systems, using 32-bit accesses to store Thumb instructions will not
* work in all cases, due to alignment constraints. For now, a correct
* version is not provided for BE32.
*/
#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
#endif
#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
#ifndef CONFIG_CPU_ENDIAN_BE32
#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
#endif
/* Operations specific to Thumb opcodes */
/* Instruction size checks: */
#define __opcode_is_thumb32(x) ( \
((x) & 0xF8000000) == 0xE8000000 \
|| ((x) & 0xF0000000) == 0xF0000000 \
)
#define __opcode_is_thumb16(x) ( \
((x) & 0xFFFF0000) == 0 \
&& !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \
)
/* Operations to construct or split 32-bit Thumb instructions: */
#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
#define __opcode_thumb32_second(x) (___opcode_identity16(x))
#define __opcode_thumb32_compose(first, second) ( \
(___opcode_identity32(___opcode_identity16(first)) << 16) \
| ___opcode_identity32(___opcode_identity16(second)) \
)
#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
#define ___asm_opcode_thumb32_compose(first, second) ( \
(___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
| ___asm_opcode_identity32(___asm_opcode_identity16(second)) \
)
/*
* Opcode injection helpers
*
* In rare cases it is necessary to assemble an opcode which the
* assembler does not support directly, or which would normally be
* rejected because of the CFLAGS or AFLAGS used to build the affected
* file.
*
* Before using these macros, consider carefully whether it is feasible
* instead to change the build flags for your file, or whether it really
* makes sense to support old assembler versions when building that
* particular kernel feature.
*
* The macros defined here should only be used where there is no viable
* alternative.
*
*
* __inst_arm(x): emit the specified ARM opcode
* __inst_thumb16(x): emit the specified 16-bit Thumb opcode
* __inst_thumb32(x): emit the specified 32-bit Thumb opcode
*
* __inst_arm_thumb16(arm, thumb): emit either the specified arm or
* 16-bit Thumb opcode, depending on whether an ARM or Thumb-2
* kernel is being built
*
* __inst_arm_thumb32(arm, thumb): emit either the specified arm or
* 32-bit Thumb opcode, depending on whether an ARM or Thumb-2
* kernel is being built
*
*
* Note that using these macros directly is poor practice. Instead, you
* should use them to define human-readable wrapper macros to encode the
* instructions that you care about. In code which might run on ARMv7 or
* above, you can usually use the __inst_arm_thumb{16,32} macros to
* specify the ARM and Thumb alternatives at the same time. This ensures
* that the correct opcode gets emitted depending on the instruction set
* used for the kernel build.
*
* Look at opcodes-virt.h for an example of how to use these macros.
*/
#include <linux/stringify.h>
#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
#define __inst_thumb32(x) ___inst_thumb32( \
___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \
___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \
)
#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
#ifdef CONFIG_THUMB2_KERNEL
#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
__inst_thumb16(thumb_opcode)
#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
__inst_thumb32(thumb_opcode)
#else
#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
#endif
/* Helpers for the helpers. Don't use these directly. */
#ifdef __ASSEMBLY__
#define ___inst_arm(x) .long x
#define ___inst_thumb16(x) .short x
#define ___inst_thumb32(first, second) .short first, second
#else
#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
#define ___inst_thumb32(first, second) \
".short " __stringify(first) ", " __stringify(second) "\n\t"
#endif
#endif /* __ASM_ARM_OPCODES_H */

134
include/linux/arm-smccc.h Normal file
View file

@ -0,0 +1,134 @@
/*
* Copyright (c) 2015, Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
*/
#define ARM_SMCCC_STD_CALL 0
#define ARM_SMCCC_FAST_CALL 1
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
#define ARM_SMCCC_SMC_64 1
#define ARM_SMCCC_CALL_CONV_SHIFT 30
#define ARM_SMCCC_OWNER_MASK 0x3F
#define ARM_SMCCC_OWNER_SHIFT 24
#define ARM_SMCCC_FUNC_MASK 0xFFFF
#define ARM_SMCCC_IS_FAST_CALL(smc_val) \
((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
#define ARM_SMCCC_IS_64(smc_val) \
((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK)
#define ARM_SMCCC_OWNER_NUM(smc_val) \
(((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
(((type) << ARM_SMCCC_TYPE_SHIFT) | \
((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
(((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
((func_num) & ARM_SMCCC_FUNC_MASK))
#define ARM_SMCCC_OWNER_ARCH 0
#define ARM_SMCCC_OWNER_CPU 1
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/types.h>
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
*/
struct arm_smccc_res {
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
};
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
* @state: quirk specific information
* @a6: Qualcomm quirk entry for returning post-smc call contents of a6
*/
struct arm_smccc_quirk {
int id;
union {
unsigned long a6;
} state;
};
/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
* @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make SMC calls following SMC Calling Convention.
* The content of the supplied param are copied to registers 0 to 7 prior
* to the SMC instruction. The return values are updated with the content
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
/**
* __arm_smccc_hvc() - make HVC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
* @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make HVC calls following SMC Calling
* Convention. The content of the supplied param are copied to registers 0
* to 7 prior to the HVC instruction. The return values are updated with
* the content from register 0 to 3 on return from the HVC instruction. An
* optional quirk structure provides vendor specific behavior.
*/
asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/