openwrt源码编译相关error问题: g_date_strftime、automake、compiler-gcc.h、cnfs.gperf libc_name_pre 等 -打patch补丁解决

目录

1、gdate.c: In function 'g_date_strftime':       gdate.c:2497:7: error: format not a string literal, format string not checked [-Werror=format-nonliteral]       tmplen = strftime (tmpbuf, tmpbufsize, locale_format, &tm);       ^~~~~~

2、Unescaped left brace in regex is illegal here in regex; marked by <-- HERE in m/\${ <-- HERE ([^ \t=:+{}]+)}/ at ./bin/automake.tmp line 3938.Makefile:50: recipe for target '/home/sz/Desktop/openwrt_widora/build_dir/host/automake-1.15/.configured' failed

3、 cnfs.gperf libc_name_p redeclared

4、编译器版本过高


这里我只给大家分享一下常见的编译的error问题的汇总,下一篇:如何给openwrt打patch补丁介绍如何产生patch文件。

1、gdate.c: In function 'g_date_strftime':
       gdate.c:2497:7: error: format not a string literal, format string not checked [-Werror=format-nonliteral]
       tmplen = strftime (tmpbuf, tmpbufsize, locale_format, &tm);
       ^~~~~~

在openwrt_widora/tools/pkg-config/patches文件夹下新建一个:001-glib-gdate-suppress-string-format-literal-warning.patch 文件

内容如下:

--- a/glib/glib/gdate.c

+++ b/glib/glib/gdate.c

@@ -2439,6 +2439,9 @@ win32_strftime_helper (const GDate     *d,

  *

  * Returns: number of characters written to the buffer, or 0 the buffer was too small

  */

+#pragma GCC diagnostic push

+#pragma GCC diagnostic ignored "-Wformat-nonliteral"

+

 gsize     

 g_date_strftime (gchar       *s, 

                  gsize        slen, 

@@ -2549,3 +2552,5 @@ g_date_strftime (gchar       *s,

   return retval;

 #endif

 }

+

+#pragma GCC diagnostic pop

2、Unescaped left brace in regex is illegal here in regex; marked by <-- HERE in m/\${ <-- HERE ([^ \t=:+{}]+)}/ at ./bin/automake.tmp line 3938.
Makefile:50: recipe for target '/home/sz/Desktop/openwrt_widora/build_dir/host/automake-1.15/.configured' failed

error2

在openwrt_widora/tools/automake/patches文件夹下新建一个:210-fix-bug-because-of-high-perl-version.patch文件

内容如下:

diff --git a/bin/automake.in b/bin/automake.in

index 0ee37149dd..8ce621d1af 100644

--- a/bin/automake.in

+++ b/automake.in

@@ -3880,7 +3880,8 @@ sub substitute_ac_subst_variables_worker

 sub substitute_ac_subst_variables

 {

   my ($text) = @_;

-  $text =~ s/\${([^ \t=:+{}]+)}/substitute_ac_subst_variables_worker ($1)/ge;

+#  $text =~ s/\${([^ \t=:+{}]+)}/substitute_ac_subst_variables_worker ($1)/ge;

+  $text =~ s/\$[{]([^ \t=:+{}]+)}/substitute_ac_subst_variables_worker ($1)/ge;

   return $text;

 }

 

3、 cnfs.gperf libc_name_p redeclared

标题
--- cfns1.h	2019-10-22 14:41:30.629369914 +0800
+++ cfns.h	2019-10-22 14:38:32.225369000 +0800
@@ -51,10 +51,10 @@
 __inline
 #endif
 static unsigned int hash (const char *, unsigned int);
-#ifdef __GNUC__
-__inline
-#endif
-const char * libc_name_p (const char *, unsigned int);
+//#ifdef __GNUC__
+//__inline
+//#endif
+//const char * libc_name_p (const char *, unsigned int);
 /* maximum key range = 391, duplicates = 0 */
 
 #ifdef __GNUC__

 

4、编译器版本过高

标题
diff --git b/include/linux/compiler-gcc.h a/include/linux/compiler-gcc.h

index e057bd2..22ab246 100644

--- b/include/linux/compiler-gcc.h

+++ a/include/linux/compiler-gcc.h

@@ -5,14 +5,28 @@

 /*

  * Common definitions for all gcc versions go here.

  */

-#define GCC_VERSION (__GNUC__ * 10000 \

-		   + __GNUC_MINOR__ * 100 \

-		   + __GNUC_PATCHLEVEL__)

-

+#define GCC_VERSION (__GNUC__ * 10000		\

+		     + __GNUC_MINOR__ * 100	\

+		     + __GNUC_PATCHLEVEL__)

 

 /* Optimization barrier */

+

 /* The "volatile" is due to gcc bugs */

 #define barrier() __asm__ __volatile__("": : :"memory")

+/*

+ * This version is i.e. to prevent dead stores elimination on @ptr

+ * where gcc and llvm may behave differently when otherwise using

+ * normal barrier(): while gcc behavior gets along with a normal

+ * barrier(), llvm needs an explicit input variable to be assumed

+ * clobbered. The issue is as follows: while the inline asm might

+ * access any memory it wants, the compiler could have fit all of

+ * @ptr into memory registers instead, and since @ptr never escaped

+ * from that, it proofed that the inline asm wasn't touching any of

+ * it. This version works well with both compilers, i.e. we're telling

+ * the compiler that the inline asm absolutely may see the contents

+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495

+ */

+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")

 

 /*

  * This macro obfuscates arithmetic on a variable address so that gcc

@@ -32,58 +46,63 @@

  * the inline assembly constraint from =g to =r, in this particular

  * case either is valid.

  */

-#define RELOC_HIDE(ptr, off)					\

-  ({ unsigned long __ptr;					\

-    __asm__ ("" : "=r"(__ptr) : "0"(ptr));		\

-    (typeof(ptr)) (__ptr + (off)); })

+#define RELOC_HIDE(ptr, off)						\

+({									\

+	unsigned long __ptr;						\

+	__asm__ ("" : "=r"(__ptr) : "0"(ptr));				\

+	(typeof(ptr)) (__ptr + (off));					\

+})

 

 /* Make the optimizer believe the variable can be manipulated arbitrarily. */

-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))

+#define OPTIMIZER_HIDE_VAR(var)						\

+	__asm__ ("" : "=r" (var) : "0" (var))

 

 #ifdef __CHECKER__

-#define __must_be_array(arr) 0

+#define __must_be_array(a)	0

 #else

 /* &a[0] degrades to a pointer: a different type from an array */

-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))

+#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))

 #endif

 

 /*

  * Force always-inline if the user requests it so via the .config,

  * or if gcc is too old:

  */

-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \

+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\

     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)

-# define inline		inline		__attribute__((always_inline)) notrace

-# define __inline__	__inline__	__attribute__((always_inline)) notrace

-# define __inline	__inline	__attribute__((always_inline)) notrace

+#define inline		inline		__attribute__((always_inline)) notrace

+#define __inline__	__inline__	__attribute__((always_inline)) notrace

+#define __inline	__inline	__attribute__((always_inline)) notrace

 #else

 /* A lot of inline functions can cause havoc with function tracing */

-# define inline		inline		notrace

-# define __inline__	__inline__	notrace

-# define __inline	__inline	notrace

+#define inline		inline		notrace

+#define __inline__	__inline__	notrace

+#define __inline	__inline	notrace

 #endif

 

-#define __deprecated			__attribute__((deprecated))

-#ifndef __packed

-#define __packed			__attribute__((packed))

-#endif

-#ifndef __weak

-#define __weak				__attribute__((weak))

-#endif

+#define __always_inline	inline __attribute__((always_inline))

+#define  noinline	__attribute__((noinline))

+

+#define __deprecated	__attribute__((deprecated))

+#define __packed	__attribute__((packed))

+#define __weak		__attribute__((weak))

+#define __alias(symbol)	__attribute__((alias(#symbol)))

 

 /*

- * it doesn't make sense on ARM (currently the only user of __naked) to trace

- * naked functions because then mcount is called without stack and frame pointer

- * being set up and there is no chance to restore the lr register to the value

- * before mcount was called.

+ * it doesn't make sense on ARM (currently the only user of __naked)

+ * to trace naked functions because then mcount is called without

+ * stack and frame pointer being set up and there is no chance to

+ * restore the lr register to the value before mcount was called.

+ *

+ * The asm() bodies of naked functions often depend on standard calling

+ * conventions, therefore they must be noinline and noclone.

  *

- * The asm() bodies of naked functions often depend on standard calling conventions,

- * therefore they must be noinline and noclone.  GCC 4.[56] currently fail to enforce

- * this, so we must do so ourselves.  See GCC PR44290.

+ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.

+ * See GCC PR44290.

  */

-#define __naked				__attribute__((naked)) noinline __noclone notrace

+#define __naked		__attribute__((naked)) noinline __noclone notrace

 

-#define __noreturn			__attribute__((noreturn))

+#define __noreturn	__attribute__((noreturn))

 

 /*

  * From the GCC manual:

@@ -95,34 +114,170 @@

  * would be.

  * [...]

  */

-#ifndef __pure

-#define __pure				__attribute__((pure))

+#define __pure			__attribute__((pure))

+#define __aligned(x)		__attribute__((aligned(x)))

+#define __printf(a, b)		__attribute__((format(printf, a, b)))

+#define __scanf(a, b)		__attribute__((format(scanf, a, b)))

+#define __attribute_const__	__attribute__((__const__))

+#define __maybe_unused		__attribute__((unused))

+#define __always_unused		__attribute__((unused))

+

+/* gcc version specific checks */

+

+#if GCC_VERSION < 30200

+# error Sorry, your compiler is too old - please upgrade it.

+#endif

+

+#if GCC_VERSION < 30300

+# define __used			__attribute__((__unused__))

+#else

+# define __used			__attribute__((__used__))

+#endif

+

+#ifdef CONFIG_GCOV_KERNEL

+# if GCC_VERSION < 30400

+#   error "GCOV profiling support for gcc versions below 3.4 not included"

+# endif /* __GNUC_MINOR__ */

+#endif /* CONFIG_GCOV_KERNEL */

+

+#if GCC_VERSION >= 30400

+#define __must_check		__attribute__((warn_unused_result))

+#endif

+

+#if GCC_VERSION >= 40000

+

+/* GCC 4.1.[01] miscompiles __weak */

+#ifdef __KERNEL__

+# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101

+#  error Your version of gcc miscompiles the __weak directive

+# endif

+#endif

+

+#define __used			__attribute__((__used__))

+#define __compiler_offsetof(a, b)					\

+	__builtin_offsetof(a, b)

+

+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600

+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)

+#endif

+

+#if GCC_VERSION >= 40300

+/* Mark functions as cold. gcc will assume any path leading to a call

+ * to them will be unlikely.  This means a lot of manual unlikely()s

+ * are unnecessary now for any paths leading to the usual suspects

+ * like BUG(), printk(), panic() etc. [but let's keep them for now for

+ * older compilers]

+ *

+ * Early snapshots of gcc 4.3 don't support this and we can't detect this

+ * in the preprocessor, but we can live with this because they're unreleased.

+ * Maketime probing would be overkill here.

+ *

+ * gcc also has a __attribute__((__hot__)) to move hot functions into

+ * a special section, but I don't see any sense in this right now in

+ * the kernel context

+ */

+#define __cold			__attribute__((__cold__))

+

+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)

+

+#ifndef __CHECKER__

+# define __compiletime_warning(message) __attribute__((warning(message)))

+# define __compiletime_error(message) __attribute__((error(message)))

+#endif /* __CHECKER__ */

+#endif /* GCC_VERSION >= 40300 */

+

+#if GCC_VERSION >= 40500

+/*

+ * Mark a position in code as unreachable.  This can be used to

+ * suppress control flow warnings after asm blocks that transfer

+ * control elsewhere.

+ *

+ * Early snapshots of gcc 4.5 don't support this and we can't detect

+ * this in the preprocessor, but we can live with this because they're

+ * unreleased.  Really, we need to have autoconf for the kernel.

+ */

+#define unreachable() __builtin_unreachable()

+

+/* Mark a function definition as prohibited from being cloned. */

+#define __noclone	__attribute__((__noclone__))

+

+#endif /* GCC_VERSION >= 40500 */

+

+#if GCC_VERSION >= 40600

+/*

+ * When used with Link Time Optimization, gcc can optimize away C functions or

+ * variables which are referenced only from assembly code.  __visible tells the

+ * optimizer that something else uses this function or variable, thus preventing

+ * this.

+ */

+#define __visible	__attribute__((externally_visible))

 #endif

-#ifndef __aligned

-#define __aligned(x)			__attribute__((aligned(x)))

+

+

+#if GCC_VERSION >= 40900 && !defined(__CHECKER__)

+/*

+ * __assume_aligned(n, k): Tell the optimizer that the returned

+ * pointer can be assumed to be k modulo n. The second argument is

+ * optional (default 0), so we use a variadic macro to make the

+ * shorthand.

+ *

+ * Beware: Do not apply this to functions which may return

+ * ERR_PTRs. Also, it is probably unwise to apply it to functions

+ * returning extra information in the low bits (but in that case the

+ * compiler should see some alignment anyway, when the return value is

+ * massaged by 'flags = ptr & 3; ptr &= ~3;').

+ */

+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))

 #endif

-#define __printf(a, b)			__attribute__((format(printf, a, b)))

-#define __scanf(a, b)			__attribute__((format(scanf, a, b)))

-#define  noinline			__attribute__((noinline))

-#define __attribute_const__		__attribute__((__const__))

-#define __maybe_unused			__attribute__((unused))

-#define __always_unused			__attribute__((unused))

 

-#define __gcc_header(x) #x

-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)

-#define gcc_header(x) _gcc_header(x)

-#include gcc_header(__GNUC__)

+/*

+ * GCC 'asm goto' miscompiles certain code sequences:

+ *

+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670

+ *

+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.

+ *

+ * (asm goto is automatically volatile - the naming reflects this.)

+ */

+#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)

+

+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP

+#if GCC_VERSION >= 40400

+#define __HAVE_BUILTIN_BSWAP32__

+#define __HAVE_BUILTIN_BSWAP64__

+#endif

+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)

+#define __HAVE_BUILTIN_BSWAP16__

+#endif

+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */

+

+#if GCC_VERSION >= 50000

+#define KASAN_ABI_VERSION 4

+#elif GCC_VERSION >= 40902

+#define KASAN_ABI_VERSION 3

+#endif

+

+#if GCC_VERSION >= 40902

+/*

+ * Tell the compiler that address safety instrumentation (KASAN)

+ * should not be applied to that function.

+ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368

+ */

+#define __no_sanitize_address __attribute__((no_sanitize_address))

+#endif

+

+#endif	/* gcc version >= 40000 specific checks */

 

 #if !defined(__noclone)

 #define __noclone	/* not needed */

 #endif

 

+#if !defined(__no_sanitize_address)

+#define __no_sanitize_address

+#endif

+

 /*

  * A trick to suppress uninitialized variable warning without generating any

  * code

  */

 #define uninitialized_var(x) x = x

-

-#ifndef __always_inline

-#define __always_inline		inline __attribute__((always_inline))

-#endif

diff --git b/include/linux/compiler-gcc3.h a/include/linux/compiler-gcc3.h

deleted file mode 100644

index 7d89feb..0000000

--- b/include/linux/compiler-gcc3.h

+++ /dev/null

@@ -1,23 +0,0 @@

-#ifndef __LINUX_COMPILER_H

-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."

-#endif

-

-#if GCC_VERSION < 30200

-# error Sorry, your compiler is too old - please upgrade it.

-#endif

-

-#if GCC_VERSION >= 30300

-# define __used			__attribute__((__used__))

-#else

-# define __used			__attribute__((__unused__))

-#endif

-

-#if GCC_VERSION >= 30400

-#define __must_check		__attribute__((warn_unused_result))

-#endif

-

-#ifdef CONFIG_GCOV_KERNEL

-# if GCC_VERSION < 30400

-#   error "GCOV profiling support for gcc versions below 3.4 not included"

-# endif /* __GNUC_MINOR__ */

-#endif /* CONFIG_GCOV_KERNEL */

diff --git b/include/linux/compiler-gcc4.h a/include/linux/compiler-gcc4.h

deleted file mode 100644

index c982a09..0000000

--- b/include/linux/compiler-gcc4.h

+++ /dev/null

@@ -1,81 +0,0 @@

-#ifndef __LINUX_COMPILER_H

-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."

-#endif

-

-#define __used			__attribute__((__used__))

-#define __must_check 		__attribute__((warn_unused_result))

-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)

-

-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600

-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)

-#endif

-

-#if GCC_VERSION >= 40300

-/* Mark functions as cold. gcc will assume any path leading to a call

-   to them will be unlikely.  This means a lot of manual unlikely()s

-   are unnecessary now for any paths leading to the usual suspects

-   like BUG(), printk(), panic() etc. [but let's keep them for now for

-   older compilers]

-

-   Early snapshots of gcc 4.3 don't support this and we can't detect this

-   in the preprocessor, but we can live with this because they're unreleased.

-   Maketime probing would be overkill here.

-

-   gcc also has a __attribute__((__hot__)) to move hot functions into

-   a special section, but I don't see any sense in this right now in

-   the kernel context */

-#define __cold			__attribute__((__cold__))

-

-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)

-

-#ifndef __CHECKER__

-# define __compiletime_warning(message) __attribute__((warning(message)))

-# define __compiletime_error(message) __attribute__((error(message)))

-#endif /* __CHECKER__ */

-#endif /* GCC_VERSION >= 40300 */

-

-#if GCC_VERSION >= 40500

-/*

- * Mark a position in code as unreachable.  This can be used to

- * suppress control flow warnings after asm blocks that transfer

- * control elsewhere.

- *

- * Early snapshots of gcc 4.5 don't support this and we can't detect

- * this in the preprocessor, but we can live with this because they're

- * unreleased.  Really, we need to have autoconf for the kernel.

- */

-#define unreachable() __builtin_unreachable()

-

-/* Mark a function definition as prohibited from being cloned. */

-#define __noclone	__attribute__((__noclone__))

-

-#endif /* GCC_VERSION >= 40500 */

-

-#if GCC_VERSION >= 40600

-/*

- * Tell the optimizer that something else uses this function or variable.

- */

-#define __visible __attribute__((externally_visible))

-#endif

-

-/*

- * GCC 'asm goto' miscompiles certain code sequences:

- *

- *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670

- *

- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.

- * Fixed in GCC 4.8.2 and later versions.

- *

- * (asm goto is automatically volatile - the naming reflects this.)

- */

-#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)

-

-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP

-#if GCC_VERSION >= 40400

-#define __HAVE_BUILTIN_BSWAP32__

-#define __HAVE_BUILTIN_BSWAP64__

-#endif

-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)

-#define __HAVE_BUILTIN_BSWAP16__

-#endif

-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */

diff --git b/include/linux/compiler-intel.h a/include/linux/compiler-intel.h

index ba147a1..d4c7113 100644

--- b/include/linux/compiler-intel.h

+++ a/include/linux/compiler-intel.h

@@ -13,9 +13,14 @@

 /* Intel ECC compiler doesn't support gcc specific asm stmts.

  * It uses intrinsics to do the equivalent things.

  */

+#undef barrier

+#undef barrier_data

 #undef RELOC_HIDE

 #undef OPTIMIZER_HIDE_VAR

 

+#define barrier() __memory_barrier()

+#define barrier_data(ptr) barrier()

+

 #define RELOC_HIDE(ptr, off)					\

   ({ unsigned long __ptr;					\

      __ptr = (unsigned long) (ptr);				\

diff --git b/include/linux/compiler.h a/include/linux/compiler.h

index d5ad7b1..020ad16 100644

--- b/include/linux/compiler.h

+++ a/include/linux/compiler.h

@@ -17,6 +17,7 @@

 # define __release(x)	__context__(x,-1)

 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)

 # define __percpu	__attribute__((noderef, address_space(3)))

+# define __pmem		__attribute__((noderef, address_space(5)))

 #ifdef CONFIG_SPARSE_RCU_POINTER

 # define __rcu		__attribute__((noderef, address_space(4)))

 #else

@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);

 # define __cond_lock(x,c) (c)

 # define __percpu

 # define __rcu

+# define __pmem

 #endif

 

 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */

@@ -54,7 +56,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);

 #include <linux/compiler-gcc.h>

 #endif

 

+#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)

+#define notrace __attribute__((hotpatch(0,0)))

+#else

 #define notrace __attribute__((no_instrument_function))

+#endif

 

 /* Intel compiler defines __GNUC__. So we will overwrite implementations

  * coming from above header files here

@@ -138,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

  */

 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )

 #define __trace_if(cond) \

-	if (__builtin_constant_p((cond)) ? !!(cond) :			\

+	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\

 	({								\

 		int ______r;						\

 		static struct ftrace_branch_data			\

@@ -165,6 +171,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

 # define barrier() __memory_barrier()

 #endif

 

+#ifndef barrier_data

+# define barrier_data(ptr) barrier()

+#endif

+

 /* Unreachable code */

 #ifndef unreachable

 # define unreachable() do { } while (1)

@@ -186,6 +196,126 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)

 #endif

 

+#include <linux/types.h>

+

+#define __READ_ONCE_SIZE						\

+({									\

+	switch (size) {							\

+	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\

+	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\

+	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\

+	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\

+	default:							\

+		barrier();						\

+		__builtin_memcpy((void *)res, (const void *)p, size);	\

+		barrier();						\

+	}								\

+})

+

+static __always_inline

+void __read_once_size(const volatile void *p, void *res, int size)

+{

+	__READ_ONCE_SIZE;

+}

+

+#ifdef CONFIG_KASAN

+/*

+ * This function is not 'inline' because __no_sanitize_address confilcts

+ * with inlining. Attempt to inline it may cause a build failure.

+ * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368

+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.

+ */

+static __no_sanitize_address __maybe_unused

+void __read_once_size_nocheck(const volatile void *p, void *res, int size)

+{

+	__READ_ONCE_SIZE;

+}

+#else

+static __always_inline

+void __read_once_size_nocheck(const volatile void *p, void *res, int size)

+{

+	__READ_ONCE_SIZE;

+}

+#endif

+

+static __always_inline void __write_once_size(volatile void *p, void *res, int size)

+{

+	switch (size) {

+	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;

+	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;

+	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;

+	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;

+	default:

+		barrier();

+		__builtin_memcpy((void *)p, (const void *)res, size);

+		barrier();

+	}

+}

+

+/*

+ * Prevent the compiler from merging or refetching reads or writes. The

+ * compiler is also forbidden from reordering successive instances of

+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the

+ * compiler is aware of some particular ordering.  One way to make the

+ * compiler aware of ordering is to put the two invocations of READ_ONCE,

+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.

+ *

+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate

+ * data types like structs or unions. If the size of the accessed data

+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)

+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a

+ * compile-time warning.

+ *

+ * Their two major use cases are: (1) Mediating communication between

+ * process-level code and irq/NMI handlers, all running on the same CPU,

+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise

+ * mutilate accesses that either do not require ordering or that interact

+ * with an explicit memory barrier or atomic instruction that provides the

+ * required ordering.

+ */

+

+#define __READ_ONCE(x, check)						\

+({									\

+	union { typeof(x) __val; char __c[1]; } __u;			\

+	if (check)							\

+		__read_once_size(&(x), __u.__c, sizeof(x));		\

+	else								\

+		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\

+	__u.__val;							\

+})

+#define READ_ONCE(x) __READ_ONCE(x, 1)

+

+/*

+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need

+ * to hide memory access from KASAN.

+ */

+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)

+

+#define WRITE_ONCE(x, val) \

+({							\

+	union { typeof(x) __val; char __c[1]; } __u =	\

+		{ .__val = (__force typeof(x)) (val) }; \

+	__write_once_size(&(x), __u.__c, sizeof(x));	\

+	__u.__val;					\

+})

+

+/**

+ * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering

+ * @cond: boolean expression to wait for

+ *

+ * Equivalent to using smp_load_acquire() on the condition variable but employs

+ * the control dependency of the wait to reduce the barrier on many platforms.

+ *

+ * The control dependency provides a LOAD->STORE order, the additional RMB

+ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,

+ * aka. ACQUIRE.

+ */

+#define smp_cond_acquire(cond)	do {		\

+	while (!(cond))				\

+		cpu_relax();			\

+	smp_rmb(); /* ctrl + rmb := acquire */	\

+} while (0)

+

 #endif /* __KERNEL__ */

 

 #endif /* __ASSEMBLY__ */

@@ -304,6 +434,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

 #define __visible

 #endif

 

+/*

+ * Assume alignment of return value.

+ */

+#ifndef __assume_aligned

+#define __assume_aligned(a, ...)

+#endif

+

+

 /* Are two types/vars the same type (ignoring qualifiers)? */

 #ifndef __same_type

 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

@@ -311,7 +449,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

 

 /* Is this type a native word size -- useful for atomic operations */

 #ifndef __native_word

-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))

+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))

 #endif

 

 /* Compile time object size, -1 for unknown */

@@ -373,12 +511,38 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

  * to make the compiler aware of ordering is to put the two invocations of

  * ACCESS_ONCE() in different C statements.

  *

- * This macro does absolutely -nothing- to prevent the CPU from reordering,

- * merging, or refetching absolutely anything at any time.  Its main intended

- * use is to mediate communication between process-level code and irq/NMI

- * handlers, all running on the same CPU.

+ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE

+ * on a union member will work as long as the size of the member matches the

+ * size of the union and the size is smaller than word size.

+ *

+ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication

+ * between process-level code and irq/NMI handlers, all running on the same CPU,

+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise

+ * mutilate accesses that either do not require ordering or that interact

+ * with an explicit memory barrier or atomic instruction that provides the

+ * required ordering.

+ *

+ * If possible use READ_ONCE()/WRITE_ONCE() instead.

+ */

+#define __ACCESS_ONCE(x) ({ \

+	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \

+	(volatile typeof(x) *)&(x); })

+#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))

+

+/**

+ * lockless_dereference() - safely load a pointer for later dereference

+ * @p: The pointer to load

+ *

+ * Similar to rcu_dereference(), but for situations where the pointed-to

+ * object's lifetime is managed by something other than RCU.  That

+ * "something other" might be reference counting or simple immortality.

  */

-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

+#define lockless_dereference(p) \

+({ \

+	typeof(p) _________p1 = READ_ONCE(p); \

+	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \

+	(_________p1); \

+})

 

 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */

 #ifdef CONFIG_KPROBES

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Ya土豆儿~

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值