This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "uClibc-ng - small C library for embedded systems".
The branch, master has been updated via b985fa069187e4c5a7ee84213d9fbead2f219ce5 (commit) via 17ea4f9622a80cc8717beeefe1371ccbcd501fe3 (commit) via 0ef881ce9568e1c2e98351fdc067bebbc8c4696a (commit) via f6cfc2129c6902410619f6cba56022af2977b6a3 (commit) from a9a2380cf01cdae519fdaf8ab021d486c8917e43 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit b985fa069187e4c5a7ee84213d9fbead2f219ce5 Author: Vineet Gupta Vineet.Gupta1@synopsys.com Date: Mon Nov 7 16:31:40 2016 -0800
NPTL/ARC: provide a kernel assisted atomic cmpxchg
For hardware configurations lacking LLOCK/SCOND (say ARC750), use a syscall to atomically do the cmpxchg. This is costly and painful, but really the only way out.
Note that kenrel only guarantees this to work in a UP configuraion
Reported-by: Avinash Patil avinashp@quantenna.com Signed-off-by: Vineet Gupta vgupta@synopsys.com
commit 17ea4f9622a80cc8717beeefe1371ccbcd501fe3 Author: Vineet Gupta Vineet.Gupta1@synopsys.com Date: Mon Nov 7 16:31:39 2016 -0800
ARC: introduce explicit support for atomics
Signed-off-by: Vineet Gupta vgupta@synopsys.com
commit 0ef881ce9568e1c2e98351fdc067bebbc8c4696a Author: Vineet Gupta Vineet.Gupta1@synopsys.com Date: Mon Nov 7 16:31:38 2016 -0800
NPTL/ARC: implement __arch_exchange_32_acq using native EX
ARC EX instruction maps directly to this primitive, thus helps elide the llock/scond based retry loop where possible.
Signed-off-by: Vineet Gupta vgupta@synopsys.com
commit f6cfc2129c6902410619f6cba56022af2977b6a3 Author: Vineet Gupta Vineet.Gupta1@synopsys.com Date: Wed Nov 9 16:03:33 2016 -0800
ARC: string: handle gcc 6.x macro changes
In gcc 6.x cleanup, the macros got renamed. (Need to support the old toggle for some more time)
Signed-off-by: Vineet Gupta vgupta@synopsys.com
-----------------------------------------------------------------------
Summary of changes: extra/Configs/Config.arc | 8 +++++ libc/string/arc/arcv2/memcpy.S | 2 +- libc/string/arc/arcv2/memset.S | 4 +-- libc/sysdeps/linux/arc/bits/atomic.h | 61 ++++++++++++++++++++++++++++++++++-- 4 files changed, 70 insertions(+), 5 deletions(-)
diff --git a/extra/Configs/Config.arc b/extra/Configs/Config.arc index 0c0bc71..c263dbf 100644 --- a/extra/Configs/Config.arc +++ b/extra/Configs/Config.arc @@ -28,6 +28,14 @@ config CONFIG_ARC_CPU_HS
endchoice
+config CONFIG_ARC_HAS_ATOMICS + bool "Support for LLOCK/SCOND instructions" + default y + help + LLOCK/SCOND instructions are needed to implement atomic r-m-w + Otherwise libc needs kernel assisted atomic cmpxchg available + since v4.9 kernel + choice prompt "MMU Page Size" default CONFIG_ARC_PAGE_SIZE_8K diff --git a/libc/string/arc/arcv2/memcpy.S b/libc/string/arc/arcv2/memcpy.S index 7573daf..ba29e87 100644 --- a/libc/string/arc/arcv2/memcpy.S +++ b/libc/string/arc/arcv2/memcpy.S @@ -23,7 +23,7 @@ # define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08 #endif
-#ifdef __LL64__ +#if defined(__LL64__) || defined(__ARC_LL64__) # define PREFETCH_READ(RX) prefetch [RX, 56] # define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] diff --git a/libc/string/arc/arcv2/memset.S b/libc/string/arc/arcv2/memset.S index 0918d37..343cfaf 100644 --- a/libc/string/arc/arcv2/memset.S +++ b/libc/string/arc/arcv2/memset.S @@ -52,7 +52,7 @@ ENTRY(memset) lpnz @.Lset64bytes ;; LOOP START PREWRITE(r3, 64) ;Prefetch the next write location -#ifdef __LL64__ +#if defined(__LL64__) || defined(__ARC_LL64__) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -85,7 +85,7 @@ ENTRY(memset) lpnz .Lset32bytes ;; LOOP START prefetchw [r3, 32] ;Prefetch the next write location -#ifdef __LL64__ +#if defined(__LL64__) || defined(__ARC_LL64__) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] diff --git a/libc/sysdeps/linux/arc/bits/atomic.h b/libc/sysdeps/linux/arc/bits/atomic.h index 1fdc83f..5878609 100644 --- a/libc/sysdeps/linux/arc/bits/atomic.h +++ b/libc/sysdeps/linux/arc/bits/atomic.h @@ -38,6 +38,11 @@ void __arc_link_error (void); #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __arc_link_error (); oldval; })
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __arc_link_error (); oldval; }) + +#ifdef __CONFIG_ARC_HAS_ATOMICS__ + #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ __typeof(oldval) prev; \ @@ -56,5 +61,57 @@ void __arc_link_error (void); prev; \ })
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __arc_link_error (); oldval; }) +#else + +#ifndef __NR_arc_usr_cmpxchg +#error "__NR_arc_usr_cmpxchg missing: Please upgrade to kernel 4.9+ headers" +#endif + +/* With lack of hardware assist, use kernel to do the atomic operation + This will only work in a UP configuration + */ +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + /* opecode INTERNAL_SYSCALL as it lacks cc clobber */ \ + register int __ret __asm__("r0") = (int)(mem); \ + register int __a1 __asm__("r1") = (int)(oldval); \ + register int __a2 __asm__("r2") = (int)(newval); \ + register int _sys_num __asm__("r8") = __NR_arc_usr_cmpxchg; \ + \ + __asm__ volatile ( \ + ARC_TRAP_INSN \ + : "+r" (__ret) \ + : "r"(_sys_num), "r"(__ret), "r"(__a1), "r"(__a2) \ + : "memory", "cc"); \ + \ + /* syscall returns previous value */ \ + /* Z bit is set if cmpxchg succeeded (we don't use that yet) */ \ + \ + (__typeof(oldval)) __ret; \ + }) + +#endif + +/* Store NEWVALUE in *MEM and return the old value. + Atomic EX is present in all configurations + */ + +#define __arch_exchange_32_acq(mem, newval) \ + ({ \ + __typeof__(*(mem)) val = newval; \ + \ + __asm__ __volatile__( \ + "ex %0, [%1]" \ + : "+r" (val) \ + : "r" (mem) \ + : "memory" ); \ + \ + val; \ + }) + +#define atomic_exchange_acq(mem, newval) \ + ({ \ + if (sizeof(*(mem)) != 4) \ + abort(); \ + __arch_exchange_32_acq(mem, newval); \ + })
hooks/post-receive