2020-05-14 12:30:06 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2010-04-23 14:05:49 +00:00
|
|
|
#ifndef __LINUX_COMPILER_H
|
|
|
|
#define __LINUX_COMPILER_H
|
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
#include <linux/compiler_types.h>
|
2010-04-23 14:05:49 +00:00
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
2014-09-03 17:40:58 +00:00
|
|
|
|
2010-04-23 14:05:49 +00:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
|
|
|
|
* to disable branch tracing on a per file basis.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
|
|
|
|
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
|
2020-05-14 12:30:06 +00:00
|
|
|
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|
|
|
int expect, int is_constant);
|
2010-04-23 14:05:49 +00:00
|
|
|
|
|
|
|
#define likely_notrace(x) __builtin_expect(!!(x), 1)
|
|
|
|
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
|
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
#define __branch_check__(x, expect, is_constant) ({ \
|
|
|
|
long ______r; \
|
|
|
|
static struct ftrace_likely_data \
|
|
|
|
__aligned(4) \
|
|
|
|
__section(_ftrace_annotated_branch) \
|
2010-04-23 14:05:49 +00:00
|
|
|
______f = { \
|
2020-05-14 12:30:06 +00:00
|
|
|
.data.func = __func__, \
|
|
|
|
.data.file = __FILE__, \
|
|
|
|
.data.line = __LINE__, \
|
2010-04-23 14:05:49 +00:00
|
|
|
}; \
|
2020-05-14 12:30:06 +00:00
|
|
|
______r = __builtin_expect(!!(x), expect); \
|
|
|
|
ftrace_likely_update(&______f, ______r, \
|
|
|
|
expect, is_constant); \
|
2010-04-23 14:05:49 +00:00
|
|
|
______r; \
|
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Using __builtin_constant_p(x) to ignore cases where the return
|
|
|
|
* value is always the same. This idea is taken from a similar patch
|
|
|
|
* written by Daniel Walker.
|
|
|
|
*/
|
|
|
|
# ifndef likely
|
2020-05-14 12:30:06 +00:00
|
|
|
# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
|
2010-04-23 14:05:49 +00:00
|
|
|
# endif
|
|
|
|
# ifndef unlikely
|
2020-05-14 12:30:06 +00:00
|
|
|
# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
|
2010-04-23 14:05:49 +00:00
|
|
|
# endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROFILE_ALL_BRANCHES
|
|
|
|
/*
|
|
|
|
* "Define 'is'", Bill Clinton
|
|
|
|
* "Define 'if'", Steven Rostedt
|
|
|
|
*/
|
2020-05-14 12:30:06 +00:00
|
|
|
#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
|
|
|
|
|
|
|
|
#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
|
|
|
|
|
|
|
|
#define __trace_if_value(cond) ({ \
|
|
|
|
static struct ftrace_branch_data \
|
|
|
|
__aligned(4) \
|
|
|
|
__section(_ftrace_branch) \
|
|
|
|
__if_trace = { \
|
|
|
|
.func = __func__, \
|
|
|
|
.file = __FILE__, \
|
|
|
|
.line = __LINE__, \
|
|
|
|
}; \
|
|
|
|
(cond) ? \
|
|
|
|
(__if_trace.miss_hit[1]++,1) : \
|
|
|
|
(__if_trace.miss_hit[0]++,0); \
|
|
|
|
})
|
|
|
|
|
2010-04-23 14:05:49 +00:00
|
|
|
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
|
|
|
|
|
|
|
|
#else
|
|
|
|
# define likely(x) __builtin_expect(!!(x), 1)
|
|
|
|
# define unlikely(x) __builtin_expect(!!(x), 0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Optimization barrier */
|
|
|
|
#ifndef barrier
|
|
|
|
# define barrier() __memory_barrier()
|
|
|
|
#endif
|
|
|
|
|
2016-02-29 16:34:15 +00:00
|
|
|
#ifndef barrier_data
|
|
|
|
# define barrier_data(ptr) barrier()
|
|
|
|
#endif
|
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
/* workaround for GCC PR82365 if needed */
|
|
|
|
#ifndef barrier_before_unreachable
|
|
|
|
# define barrier_before_unreachable() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2010-04-23 14:05:49 +00:00
|
|
|
/* Unreachable code */
|
2020-05-14 12:30:06 +00:00
|
|
|
#ifdef CONFIG_STACK_VALIDATION
|
|
|
|
/*
|
|
|
|
* These macros help objtool understand GCC code flow for unreachable code.
|
|
|
|
* The __COUNTER__ based labels are a hack to make each instance of the macros
|
|
|
|
* unique, to convince GCC not to merge duplicate inline asm statements.
|
|
|
|
*/
|
|
|
|
#define annotate_reachable() ({ \
|
|
|
|
asm volatile("%c0:\n\t" \
|
|
|
|
".pushsection .discard.reachable\n\t" \
|
|
|
|
".long %c0b - .\n\t" \
|
|
|
|
".popsection\n\t" : : "i" (__COUNTER__)); \
|
|
|
|
})
|
|
|
|
#define annotate_unreachable() ({ \
|
|
|
|
asm volatile("%c0:\n\t" \
|
|
|
|
".pushsection .discard.unreachable\n\t" \
|
|
|
|
".long %c0b - .\n\t" \
|
|
|
|
".popsection\n\t" : : "i" (__COUNTER__)); \
|
|
|
|
})
|
|
|
|
#define ASM_UNREACHABLE \
|
|
|
|
"999:\n\t" \
|
|
|
|
".pushsection .discard.unreachable\n\t" \
|
|
|
|
".long 999b - .\n\t" \
|
|
|
|
".popsection\n\t"
|
|
|
|
|
|
|
|
/* Annotate a C jump table to allow objtool to follow the code flow */
|
|
|
|
#define __annotate_jump_table __section(.rodata..c_jump_table)
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define annotate_reachable()
|
|
|
|
#define annotate_unreachable()
|
|
|
|
#define __annotate_jump_table
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef ASM_UNREACHABLE
|
|
|
|
# define ASM_UNREACHABLE
|
|
|
|
#endif
|
2010-04-23 14:05:49 +00:00
|
|
|
#ifndef unreachable
|
2020-05-14 12:30:06 +00:00
|
|
|
# define unreachable() do { \
|
|
|
|
annotate_unreachable(); \
|
|
|
|
__builtin_unreachable(); \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KENTRY - kernel entry point
|
|
|
|
* This can be used to annotate symbols (functions or data) that are used
|
|
|
|
* without their linker symbol being referenced explicitly. For example,
|
|
|
|
* interrupt vector handlers, or functions in the kernel image that are found
|
|
|
|
* programatically.
|
|
|
|
*
|
|
|
|
* Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
|
|
|
|
* are handled in their own way (with KEEP() in linker scripts).
|
|
|
|
*
|
|
|
|
* KENTRY can be avoided if the symbols in question are marked as KEEP() in the
|
|
|
|
* linker script. For example an architecture could KEEP() its entire
|
|
|
|
* boot/exception vector code rather than annotate each function and data.
|
|
|
|
*/
|
|
|
|
#ifndef KENTRY
|
|
|
|
# define KENTRY(sym) \
|
|
|
|
extern typeof(sym) sym; \
|
|
|
|
static const unsigned long __kentry_##sym \
|
|
|
|
__used \
|
|
|
|
__section("___kentry" "+" #sym ) \
|
|
|
|
= (unsigned long)&sym;
|
2010-04-23 14:05:49 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef RELOC_HIDE
|
|
|
|
# define RELOC_HIDE(ptr, off) \
|
|
|
|
({ unsigned long __ptr; \
|
|
|
|
__ptr = (unsigned long) (ptr); \
|
|
|
|
(typeof(ptr)) (__ptr + (off)); })
|
|
|
|
#endif
|
|
|
|
|
2014-09-03 17:40:58 +00:00
|
|
|
#ifndef OPTIMIZER_HIDE_VAR
|
2020-05-14 12:30:06 +00:00
|
|
|
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
|
|
|
#define OPTIMIZER_HIDE_VAR(var) \
|
|
|
|
__asm__ ("" : "=r" (var) : "0" (var))
|
2014-09-03 17:40:58 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Not-quite-unique ID. */
|
|
|
|
#ifndef __UNIQUE_ID
|
|
|
|
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
|
|
|
|
#endif
|
|
|
|
|
2016-02-29 16:34:15 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#define __READ_ONCE_SIZE \
|
|
|
|
({ \
|
|
|
|
switch (size) { \
|
|
|
|
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
|
|
|
|
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
|
|
|
|
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
|
|
|
|
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
|
|
|
|
default: \
|
|
|
|
barrier(); \
|
|
|
|
__builtin_memcpy((void *)res, (const void *)p, size); \
|
|
|
|
barrier(); \
|
|
|
|
} \
|
|
|
|
})
|
|
|
|
|
|
|
|
static __always_inline
|
|
|
|
void __read_once_size(const volatile void *p, void *res, int size)
|
|
|
|
{
|
|
|
|
__READ_ONCE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
/*
|
2020-05-14 12:30:06 +00:00
|
|
|
* We can't declare function 'inline' because __no_sanitize_address confilcts
|
2016-02-29 16:34:15 +00:00
|
|
|
* with inlining. Attempt to inline it may cause a build failure.
|
|
|
|
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
|
|
|
|
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
|
|
|
|
*/
|
2020-05-14 12:30:06 +00:00
|
|
|
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
|
2016-02-29 16:34:15 +00:00
|
|
|
#else
|
2020-05-14 12:30:06 +00:00
|
|
|
# define __no_kasan_or_inline __always_inline
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static __no_kasan_or_inline
|
2016-02-29 16:34:15 +00:00
|
|
|
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
|
|
|
|
{
|
|
|
|
__READ_ONCE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
|
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
|
|
|
|
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
|
|
|
|
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
|
|
|
|
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
|
|
|
|
default:
|
|
|
|
barrier();
|
|
|
|
__builtin_memcpy((void *)p, (const void *)res, size);
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent the compiler from merging or refetching reads or writes. The
|
|
|
|
* compiler is also forbidden from reordering successive instances of
|
2020-05-14 12:30:06 +00:00
|
|
|
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
|
|
|
|
* particular ordering. One way to make the compiler aware of ordering is to
|
|
|
|
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
|
|
|
|
* statements.
|
2016-02-29 16:34:15 +00:00
|
|
|
*
|
2020-05-14 12:30:06 +00:00
|
|
|
* These two macros will also work on aggregate data types like structs or
|
|
|
|
* unions. If the size of the accessed data type exceeds the word size of
|
|
|
|
* the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
|
|
|
|
* fall back to memcpy(). There's at least two memcpy()s: one for the
|
|
|
|
* __builtin_memcpy() and then one for the macro doing the copy of variable
|
|
|
|
* - '__u' allocated on the stack.
|
2016-02-29 16:34:15 +00:00
|
|
|
*
|
|
|
|
* Their two major use cases are: (1) Mediating communication between
|
|
|
|
* process-level code and irq/NMI handlers, all running on the same CPU,
|
2020-05-14 12:30:06 +00:00
|
|
|
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
2016-02-29 16:34:15 +00:00
|
|
|
* mutilate accesses that either do not require ordering or that interact
|
|
|
|
* with an explicit memory barrier or atomic instruction that provides the
|
|
|
|
* required ordering.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define __READ_ONCE(x, check) \
|
|
|
|
({ \
|
|
|
|
union { typeof(x) __val; char __c[1]; } __u; \
|
|
|
|
if (check) \
|
|
|
|
__read_once_size(&(x), __u.__c, sizeof(x)); \
|
|
|
|
else \
|
|
|
|
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
|
|
|
|
__u.__val; \
|
|
|
|
})
|
|
|
|
#define READ_ONCE(x) __READ_ONCE(x, 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
|
|
|
|
* to hide memory access from KASAN.
|
|
|
|
*/
|
|
|
|
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
|
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
static __no_kasan_or_inline
|
|
|
|
unsigned long read_word_at_a_time(const void *addr)
|
|
|
|
{
|
|
|
|
return *(unsigned long *)addr;
|
|
|
|
}
|
|
|
|
|
2016-02-29 16:34:15 +00:00
|
|
|
#define WRITE_ONCE(x, val) \
|
|
|
|
({ \
|
|
|
|
union { typeof(x) __val; char __c[1]; } __u = \
|
|
|
|
{ .__val = (__force typeof(x)) (val) }; \
|
|
|
|
__write_once_size(&(x), __u.__c, sizeof(x)); \
|
|
|
|
__u.__val; \
|
|
|
|
})
|
|
|
|
|
2010-04-23 14:05:49 +00:00
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
/*
|
2020-05-14 12:30:06 +00:00
|
|
|
* Force the compiler to emit 'sym' as a symbol, so that we can reference
|
|
|
|
* it from inline assembler. Necessary in case 'sym' could be inlined
|
|
|
|
* otherwise, or eliminated entirely due to lack of references that are
|
|
|
|
* visible to the compiler.
|
2010-04-23 14:05:49 +00:00
|
|
|
*/
|
2020-05-14 12:30:06 +00:00
|
|
|
#define __ADDRESSABLE(sym) \
|
|
|
|
static void * __section(.discard.addressable) __used \
|
|
|
|
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
|
2010-04-23 14:05:49 +00:00
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
/**
|
|
|
|
* offset_to_ptr - convert a relative memory offset to an absolute pointer
|
|
|
|
* @off: the address of the 32-bit offset value
|
2016-02-29 16:34:15 +00:00
|
|
|
*/
|
2020-05-14 12:30:06 +00:00
|
|
|
static inline void *offset_to_ptr(const int *off)
|
|
|
|
{
|
|
|
|
return (void *)((unsigned long)off + *off);
|
|
|
|
}
|
2010-04-23 14:05:49 +00:00
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2014-09-03 17:40:58 +00:00
|
|
|
|
2010-04-23 14:05:49 +00:00
|
|
|
/* Compile time object size, -1 for unknown */
|
|
|
|
#ifndef __compiletime_object_size
|
|
|
|
# define __compiletime_object_size(obj) -1
|
|
|
|
#endif
|
|
|
|
#ifndef __compiletime_warning
|
|
|
|
# define __compiletime_warning(message)
|
|
|
|
#endif
|
|
|
|
#ifndef __compiletime_error
|
|
|
|
# define __compiletime_error(message)
|
|
|
|
#endif
|
|
|
|
|
2017-09-16 05:10:44 +00:00
|
|
|
#ifdef __OPTIMIZE__
|
|
|
|
# define __compiletime_assert(condition, msg, prefix, suffix) \
|
2014-09-03 17:40:58 +00:00
|
|
|
do { \
|
|
|
|
extern void prefix ## suffix(void) __compiletime_error(msg); \
|
2020-05-14 12:30:06 +00:00
|
|
|
if (!(condition)) \
|
2014-09-03 17:40:58 +00:00
|
|
|
prefix ## suffix(); \
|
|
|
|
} while (0)
|
2017-09-16 05:10:44 +00:00
|
|
|
#else
|
|
|
|
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
|
|
|
|
#endif
|
2014-09-03 17:40:58 +00:00
|
|
|
|
|
|
|
#define _compiletime_assert(condition, msg, prefix, suffix) \
|
|
|
|
__compiletime_assert(condition, msg, prefix, suffix)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* compiletime_assert - break build and emit msg if condition is false
|
|
|
|
* @condition: a compile-time constant condition to check
|
|
|
|
* @msg: a message to emit if condition is false
|
|
|
|
*
|
|
|
|
* In tradition of POSIX assert, this macro will break the build if the
|
|
|
|
* supplied condition is *false*, emitting the supplied error message if the
|
|
|
|
* compiler has support to do so.
|
|
|
|
*/
|
|
|
|
#define compiletime_assert(condition, msg) \
|
2020-05-14 12:30:06 +00:00
|
|
|
_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
|
2014-09-03 17:40:58 +00:00
|
|
|
|
|
|
|
#define compiletime_assert_atomic_type(t) \
|
|
|
|
compiletime_assert(__native_word(t), \
|
|
|
|
"Need native word sized stores/loads for atomicity.")
|
|
|
|
|
2020-05-14 12:30:06 +00:00
|
|
|
/* &a[0] degrades to a pointer: a different type from an array */
|
|
|
|
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
|
2010-04-23 14:05:49 +00:00
|
|
|
|
|
|
|
#endif /* __LINUX_COMPILER_H */
|